- update vendor

master
李光春 1 year ago
parent 5ac7b3ef0a
commit fdd6dfbc63

@ -6,11 +6,11 @@ require (
github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible
github.com/allegro/bigcache/v3 v3.1.0
github.com/baidubce/bce-sdk-go v0.9.148
github.com/baidubce/bce-sdk-go v0.9.149
github.com/basgys/goxml2json v1.1.0
github.com/bytedance/sonic v1.8.7
github.com/gin-gonic/gin v1.9.0
github.com/go-co-op/gocron v1.22.2
github.com/go-co-op/gocron v1.23.0
github.com/go-playground/locales v0.14.1
github.com/go-playground/universal-translator v0.18.1
github.com/go-playground/validator/v10 v10.12.0
@ -23,7 +23,7 @@ require (
github.com/mvdan/xurls v1.1.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/oschwald/geoip2-golang v1.8.0
github.com/qiniu/go-sdk/v7 v7.14.0
github.com/qiniu/go-sdk/v7 v7.15.0
github.com/redis/go-redis/v9 v9.0.3
github.com/robfig/cron/v3 v3.0.1
github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda
@ -38,7 +38,7 @@ require (
gorm.io/datatypes v1.2.0
gorm.io/driver/mysql v1.5.0
gorm.io/driver/postgres v1.5.0
gorm.io/gen v0.3.21
gorm.io/gen v0.3.22
gorm.io/gorm v1.25.0
xorm.io/xorm v1.3.2
)

@ -32,8 +32,8 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/baidubce/bce-sdk-go v0.9.148 h1:p9HMHnOVG/v6nK7hYHkCnkJyv41KCxuuWmUM8bIpkac=
github.com/baidubce/bce-sdk-go v0.9.148/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/baidubce/bce-sdk-go v0.9.149 h1:vsV/Ic+sjPQDCKVQisxJmRjJwZP/DTZUIf5IfNFb2EA=
github.com/baidubce/bce-sdk-go v0.9.149/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw=
github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -106,8 +106,8 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
github.com/go-co-op/gocron v1.22.2 h1:5+486wUbSp2Tgodv3Fwek0OgMK/aqjcgGBcRTcT2kgs=
github.com/go-co-op/gocron v1.22.2/go.mod h1:UqVyvM90I1q/R1qGEX6cBORI6WArLuEgYlbncLMvzRM=
github.com/go-co-op/gocron v1.23.0 h1:cD8PCSsa88HKJSC8XhSWATSEKdgfKjrlnDu8zX+Jce4=
github.com/go-co-op/gocron v1.23.0/go.mod h1:gEQbrsoOV+HAp59D3LmYFgENQDeYp2QHsHT8N/Wzs/U=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
@ -447,8 +447,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk=
github.com/qiniu/go-sdk/v7 v7.14.0 h1:6icihMTKHoKMmeU1mqtIoHUv7c1LrLjYm8wTQaYDqmw=
github.com/qiniu/go-sdk/v7 v7.14.0/go.mod h1:btsaOc8CA3hdVloULfFdDgDc+g4f3TDZEFsDY0BLE+w=
github.com/qiniu/go-sdk/v7 v7.15.0 h1:vkxZZHM2Ed0qHeIx7NF3unXav+guaVIXlEsCCkpQAww=
github.com/qiniu/go-sdk/v7 v7.15.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k=
@ -575,7 +575,7 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@ -609,6 +609,7 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
@ -646,6 +647,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
@ -658,7 +660,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
@ -695,7 +696,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -703,6 +703,7 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -711,6 +712,7 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -721,6 +723,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
@ -816,8 +819,8 @@ gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1
gorm.io/driver/sqlite v1.4.2/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI=
gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU=
gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
gorm.io/gen v0.3.21 h1:t8329wT4tW1ZZWOm7vn4LV6OIrz8a5zCg+p78ezt+rA=
gorm.io/gen v0.3.21/go.mod h1:aWgvoKdG9f8Des4TegSa0N5a+gwhGsFo0JJMaLwokvk=
gorm.io/gen v0.3.22 h1:K7u5tCyaZfe1cbQFD8N2xrTqUuqximNFSRl7zOFPq+M=
gorm.io/gen v0.3.22/go.mod h1:dQcELeF/7Kf82M6AQF+O/rKT5r1sjv49TlGz0cerPn4=
gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
gorm.io/gorm v1.24.3/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=

@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
SDK_VERSION = "0.9.148"
SDK_VERSION = "0.9.149"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"

@ -3,22 +3,29 @@
[![CI State](https://github.com/go-co-op/gocron/actions/workflows/go_test.yml/badge.svg?branch=main&event=push)](https://github.com/go-co-op/gocron/actions)
![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron)
gocron is a job scheduling package which lets you run Go functions at pre-determined intervals using a simple, human-friendly syntax.
gocron is a job scheduling package which lets you run Go functions at pre-determined intervals
using a simple, human-friendly syntax.
gocron is a Golang scheduler implementation similar to the Ruby module [clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule).
gocron is a Golang scheduler implementation similar to the Ruby module
[clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule).
See also these two great articles that were used for design input:
- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/)
- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/)
If you want to chat, you can find us at Slack! [<img src="https://img.shields.io/badge/gophers-gocron-brightgreen?logo=slack">](https://gophers.slack.com/archives/CQ7T0T1FW)
If you want to chat, you can find us at Slack!
[<img src="https://img.shields.io/badge/gophers-gocron-brightgreen?logo=slack">](https://gophers.slack.com/archives/CQ7T0T1FW)
## Concepts
- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs are running at one time.
- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform actions related to that task like preventing itself from overruning a previous task that is taking a long time.
- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that the job hands to it when sent by the scheduler.
- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when
ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs
are running at one time.
- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform
actions related to that task like preventing itself from overruning a previous task that is taking a long time.
- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that
the job hands to it when sent by the scheduler.
## Examples
@ -110,6 +117,50 @@ s.RunByTag("tag")
- Q: I've removed my job from the scheduler, but how can I stop a long-running job that has already been triggered?
- A: We recommend using a means of canceling your job, e.g. a `context.WithCancel()`.
- A2: You can listen to the job context Done channel to know when the job has been canceled
```golang
task := func(in string, job gocron.Job) {
fmt.Printf("this job's last run: %s this job's next run: %s\n", job.LastRun(), job.NextRun())
fmt.Printf("in argument is %s\n", in)
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-job.Context().Done():
fmt.Printf("function has been canceled, performing cleanup and exiting gracefully\n")
return
case <-ticker.C:
fmt.Printf("performing a hard job that takes a long time that I want to kill whenever I want\n")
}
}
}
var err error
s := gocron.NewScheduler(time.UTC)
s.SingletonModeAll()
j, err := s.Every(1).Hour().Tag("myJob").DoWithJobDetails(task, "foo")
if err != nil {
log.Fatalln("error scheduling job", err)
}
s.StartAsync()
// Simulate some more work
time.Sleep(time.Second)
// I want to stop the job, together with the underlying goroutine
fmt.Printf("now I want to kill the job\n")
err = s.RemoveByTag("myJob")
if err != nil {
log.Fatalln("error removing job by tag", err)
}
// Wait a bit so that we can see that the job is exiting gracefully
time.Sleep(time.Second)
fmt.Printf("Job: %#v, Error: %#v", j, err)
```
---

@ -3,8 +3,7 @@ package gocron
import (
"context"
"sync"
"golang.org/x/sync/semaphore"
"sync/atomic"
)
const (
@ -21,27 +20,46 @@ const (
// job execution order isn't guaranteed. For example, a job that
// executes frequently may pile up in the wait queue and be executed
// many times back to back when the queue opens.
//
// Warning: do not use this mode if your jobs will continue to stack
// up beyond the ability of the limit workers to keep up. An example of
// what NOT to do:
//
// s.Every("1s").Do(func() {
// // this will result in an ever-growing number of goroutines
// // blocked trying to send to the buffered channel
// time.Sleep(10 * time.Minute)
// })
WaitMode
)
type executor struct {
jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler
ctx context.Context // used to tell the executor to stop
cancel context.CancelFunc // used to tell the executor to stop
wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop
jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish
singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete
limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler
maxRunningJobs *semaphore.Weighted
jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler
ctx context.Context // used to tell the executor to stop
cancel context.CancelFunc // used to tell the executor to stop
wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop
jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish
singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete
limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler
limitModeMaxRunningJobs int // stores the maximum number of concurrently running jobs
limitModeFuncsRunning *atomic.Int64 // tracks the count of limited mode funcs running
limitModeFuncWg *sync.WaitGroup // allow the executor to wait for limit mode functions to wrap up
limitModeQueue chan jobFunction // pass job functions to the limit mode workers
limitModeRunningJobs *atomic.Int64 // tracks the count of running jobs to check against the max
stopped *atomic.Bool // allow workers to drain the buffered limitModeQueue
}
func newExecutor() executor {
e := executor{
jobFunctions: make(chan jobFunction, 1),
singletonWgs: &sync.Map{},
wg: &sync.WaitGroup{},
jobFunctions: make(chan jobFunction, 1),
singletonWgs: &sync.Map{},
limitModeFuncsRunning: &atomic.Int64{},
limitModeFuncWg: &sync.WaitGroup{},
limitModeRunningJobs: &atomic.Int64{},
limitModeQueue: make(chan jobFunction, 1000),
}
e.wg.Add(1)
return e
}
@ -63,20 +81,66 @@ func (jf *jobFunction) singletonRunner() {
case <-jf.ctx.Done():
jf.singletonWg.Done()
jf.singletonRunnerOn.Store(false)
jf.singletonQueue = make(chan struct{}, 1000)
jf.stopped.Store(false)
return
default:
if jf.singletonQueue.Load() != 0 {
case <-jf.singletonQueue:
if !jf.stopped.Load() {
runJob(*jf)
jf.singletonQueue.Add(-1)
}
}
}
}
func (e *executor) limitModeRunner() {
e.limitModeFuncWg.Add(1)
for {
select {
case <-e.ctx.Done():
e.limitModeFuncsRunning.Add(-1)
e.limitModeFuncWg.Done()
return
case jf := <-e.limitModeQueue:
if !e.stopped.Load() {
runJob(jf)
}
}
}
}
func (e *executor) start() {
e.wg = &sync.WaitGroup{}
e.wg.Add(1)
stopCtx, cancel := context.WithCancel(context.Background())
e.ctx = stopCtx
e.cancel = cancel
e.jobsWg = &sync.WaitGroup{}
e.stopped = &atomic.Bool{}
go e.run()
}
func (e *executor) run() {
for {
select {
case f := <-e.jobFunctions:
if e.stopped.Load() {
continue
}
if e.limitModeMaxRunningJobs > 0 {
countRunning := e.limitModeFuncsRunning.Load()
if countRunning < int64(e.limitModeMaxRunningJobs) {
diff := int64(e.limitModeMaxRunningJobs) - countRunning
for i := int64(0); i < diff; i++ {
go e.limitModeRunner()
e.limitModeFuncsRunning.Add(1)
}
}
}
e.jobsWg.Add(1)
go func() {
defer e.jobsWg.Done()
@ -92,28 +156,16 @@ func (e *executor) start() {
}()
}
if e.maxRunningJobs != nil {
if !e.maxRunningJobs.TryAcquire(1) {
switch e.limitMode {
case RescheduleMode:
return
case WaitMode:
select {
case <-e.ctx.Done():
return
case <-f.ctx.Done():
return
default:
}
if err := e.maxRunningJobs.Acquire(f.ctx, 1); err != nil {
break
}
if e.limitModeMaxRunningJobs > 0 {
switch e.limitMode {
case RescheduleMode:
if e.limitModeRunningJobs.Load() < int64(e.limitModeMaxRunningJobs) {
e.limitModeQueue <- f
}
case WaitMode:
e.limitModeQueue <- f
}
defer e.maxRunningJobs.Release(1)
return
}
switch f.runConfig.mode {
@ -125,8 +177,7 @@ func (e *executor) start() {
if !f.singletonRunnerOn.Load() {
go f.singletonRunner()
}
f.singletonQueue.Add(1)
f.singletonQueue <- struct{}{}
}
}()
case <-e.ctx.Done():
@ -138,6 +189,7 @@ func (e *executor) start() {
}
func (e *executor) stop() {
e.stopped.Store(true)
e.cancel()
e.wg.Wait()
if e.singletonWgs != nil {
@ -148,4 +200,7 @@ func (e *executor) stop() {
return true
})
}
if e.limitModeMaxRunningJobs > 0 {
e.limitModeFuncWg.Wait()
}
}

@ -47,7 +47,7 @@ type jobFunction struct {
parametersLen int // length of the passed parameters
name string // nolint the function name to run
runConfig runConfig // configuration for how many times to run the job
singletonQueue *atomic.Int64 // limits inflight runs of a job to one
singletonQueue chan struct{} // queues jobs for the singleton runner to handle
singletonRunnerOn *atomic.Bool // whether the runner function for singleton is running
ctx context.Context // for cancellation
cancel context.CancelFunc // for cancellation
@ -55,6 +55,7 @@ type jobFunction struct {
runStartCount *atomic.Int64 // number of times the job was started
runFinishCount *atomic.Int64 // number of times the job was finished
singletonWg *sync.WaitGroup // used by singleton runner
stopped *atomic.Bool
}
type eventListeners struct {
@ -82,6 +83,7 @@ func (jf *jobFunction) copy() jobFunction {
runFinishCount: jf.runFinishCount,
singletonWg: jf.singletonWg,
singletonRunnerOn: jf.singletonRunnerOn,
stopped: jf.stopped,
}
cp.parameters = append(cp.parameters, jf.parameters...)
return cp
@ -120,6 +122,7 @@ func newJob(interval int, startImmediately bool, singletonMode bool) *Job {
runStartCount: &atomic.Int64{},
runFinishCount: &atomic.Int64{},
singletonRunnerOn: &atomic.Bool{},
stopped: &atomic.Bool{},
},
tags: []string{},
startsImmediately: startImmediately,
@ -290,6 +293,11 @@ func (j *Job) Error() error {
return j.error
}
// Context returns the job's context. The context controls cancellation.
func (j *Job) Context() context.Context {
return j.ctx
}
// Tag allows you to add arbitrary labels to a Job that do not
// impact the functionality of the Job
func (j *Job) Tag(tags ...string) {
@ -393,8 +401,8 @@ func (j *Job) SingletonMode() {
j.mu.Lock()
defer j.mu.Unlock()
j.runConfig.mode = singletonMode
j.jobFunction.singletonQueue = &atomic.Int64{}
j.jobFunction.singletonWg = &sync.WaitGroup{}
j.jobFunction.singletonQueue = make(chan struct{}, 100)
}
// shouldRun evaluates if this job should run again
@ -451,7 +459,9 @@ func (j *Job) stop() {
}
if j.cancel != nil {
j.cancel()
j.ctx, j.cancel = context.WithCancel(context.Background())
}
j.stopped.Store(true)
}
// IsRunning reports whether any instances of the job function are currently running

@ -10,7 +10,6 @@ import (
"time"
"github.com/robfig/cron/v3"
"golang.org/x/sync/semaphore"
)
type limitMode int8
@ -69,8 +68,10 @@ func NewScheduler(loc *time.Location) *Scheduler {
// SetMaxConcurrentJobs limits how many jobs can be running at the same time.
// This is useful when running resource intensive jobs and a precise start time is not critical.
//
// Note: WaitMode and RescheduleMode provide details on usage and potential risks.
func (s *Scheduler) SetMaxConcurrentJobs(n int, mode limitMode) {
s.executor.maxRunningJobs = semaphore.NewWeighted(int64(n))
s.executor.limitModeMaxRunningJobs = n
s.executor.limitMode = mode
}
@ -93,12 +94,7 @@ func (s *Scheduler) StartAsync() {
// start starts the scheduler, scheduling and running jobs
func (s *Scheduler) start() {
stopCtx, cancel := context.WithCancel(context.Background())
s.executor.ctx = stopCtx
s.executor.cancel = cancel
s.executor.jobsWg = &sync.WaitGroup{}
go s.executor.start()
s.executor.start()
s.setRunning(true)
s.runJobs(s.Jobs())
}
@ -595,7 +591,7 @@ func (s *Scheduler) runContinuous(job *Job) {
}
nr := next.dateTime.Sub(s.now())
if nr < 0 {
time.Sleep(absDuration(nr))
job.setLastRun(s.now())
shouldRun, next := s.scheduleNextRun(job)
if !shouldRun {
return
@ -653,10 +649,14 @@ func (s *Scheduler) RunByTagWithDelay(tag string, d time.Duration) error {
// Remove specific Job by function
//
// Removing a job stops that job's timer. However, if a job has already
// been started by by the job's timer before being removed, there is no way to stop
// it through gocron as https://pkg.go.dev/time#Timer.Stop explains.
// The job function would need to have implemented a means of
// stopping, e.g. using a context.WithCancel().
// been started by the job's timer before being removed, the only way to stop
// it through gocron is to use DoWithJobDetails and access the job's Context which
// informs you when the job has been canceled.
//
// Alternatively, the job function would need to have implemented a means of
// stopping, e.g. using a context.WithCancel() passed as params to Do method.
//
// The above are based on what the underlying library suggests https://pkg.go.dev/time#Timer.Stop.
func (s *Scheduler) Remove(job any) {
fName := getFunctionName(job)
j := s.findJobByTaskName(fName)
@ -786,6 +786,16 @@ func (s *Scheduler) LimitRunsTo(i int) *Scheduler {
// SingletonMode prevents a new job from starting if the prior job has not yet
// completed its run
//
// Warning: do not use this mode if your jobs will continue to stack
// up beyond the ability of the limit workers to keep up. An example of
// what NOT to do:
//
// s.Every("1s").SingletonMode().Do(func() {
// // this will result in an ever-growing number of goroutines
// // blocked trying to send to the buffered channel
// time.Sleep(10 * time.Minute)
// })
func (s *Scheduler) SingletonMode() *Scheduler {
job := s.getCurrentJob()
job.SingletonMode()
@ -794,6 +804,19 @@ func (s *Scheduler) SingletonMode() *Scheduler {
// SingletonModeAll prevents new jobs from starting if the prior instance of the
// particular job has not yet completed its run
//
// Warning: do not use this mode if your jobs will continue to stack
// up beyond the ability of the limit workers to keep up. An example of
// what NOT to do:
//
// s := gocron.NewScheduler(time.UTC)
// s.SingletonModeAll()
//
// s.Every("1s").Do(func() {
// // this will result in an ever-growing number of goroutines
// // blocked trying to send to the buffered channel
// time.Sleep(10 * time.Minute)
// })
func (s *Scheduler) SingletonModeAll() {
s.singletonMode = true
}
@ -1252,7 +1275,6 @@ func (s *Scheduler) Update() (*Job, error) {
}
s.updateJob = false
job.stop()
job.ctx, job.cancel = context.WithCancel(context.Background())
job.setStartsImmediately(false)
if job.runWithDetails {

@ -1,4 +1,13 @@
# Changelog
## 7.15.0
* 优化
* 表单上传 Put 方法的 PutExtra 参数,支持传入 nil
* Bucket 镜像源/配额方法内部请求使用 UC 域名
* BucketManager ListBucket 和 ListBucketContext 方法内部接口由 /v2/list 调用调整为 /list
* 新增
* BucketManager 新增批量方法 BatchWithContext
* BucketManager 增加 Bucket 列举方法 ListFileWithContext
## 7.14.0
* Go SDK 对七牛回调请求的鉴权验证函数支持 Qiniu 签名
* UploadMananger 支持双活区域

@ -17,7 +17,7 @@ github.com/qiniu/go-sdk
在您的项目中的 `go.mod` 文件内添加这行代码
```
require github.com/qiniu/go-sdk/v7 v7.14.0
require github.com/qiniu/go-sdk/v7 v7.15.0
```
并且在项目中使用 `"github.com/qiniu/go-sdk/v7"` 引用 Qiniu Go SDK。

@ -5,7 +5,7 @@ import (
"strings"
)
const Version = "7.14.0"
const Version = "7.15.0"
const (
CONTENT_TYPE_JSON = "application/json"

@ -8,14 +8,9 @@ package storage
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"github.com/qiniu/go-sdk/v7/auth"
@ -33,21 +28,11 @@ const (
// FileInfo 文件基本信息
type FileInfo struct {
// 文件的HASH值使用hash值算法计算。
Hash string `json:"hash"`
// 资源内容的大小,单位:字节。
Fsize int64 `json:"fsize"`
// 上传时间单位100纳秒其值去掉低七位即为Unix时间戳。
PutTime int64 `json:"putTime"`
/**
* //
* 1
* 2
*/
RestoreStatus int `json:"restoreStatus"`
// 文件的HASH值使用hash值算法计算。
Hash string `json:"hash"`
// 资源的 MIME 类型。
MimeType string `json:"mimeType"`
@ -61,10 +46,15 @@ type FileInfo struct {
*/
Type int `json:"type"`
// 上传时间单位100纳秒其值去掉低七位即为Unix时间戳。
PutTime int64 `json:"putTime"`
/**
* endUser
* //
* 1
* 2
*/
EndUser string `json:"endUser"`
RestoreStatus int `json:"restoreStatus"`
/**
*
@ -78,6 +68,11 @@ type FileInfo struct {
*/
Md5 string `json:"md5"`
/**
* endUser
*/
EndUser string `json:"endUser"`
/**
* int64 Unix
*
@ -157,12 +152,92 @@ func (r *FetchRet) String() string {
type BatchOpRet struct {
Code int `json:"code,omitempty"`
Data struct {
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
// 资源内容的大小,单位:字节。
Fsize int64 `json:"fsize"`
// 文件的HASH值使用hash值算法计算。
Hash string `json:"hash"`
// 资源的 MIME 类型。
MimeType string `json:"mimeType"`
Type int `json:"type"`
Error string `json:"error"`
/**
*
* 0
* 1
* 2
* 3
*/
Type int `json:"type"`
// 上传时间单位100纳秒其值去掉低七位即为Unix时间戳。
PutTime int64 `json:"putTime"`
/**
* //
* 1
* 2
*/
RestoreStatus *int `json:"restoreStatus"`
/**
*
* 0
* 1
*/
Status *int `json:"status"`
/**
* md5
*/
Md5 string `json:"md5"`
/**
* endUser
*/
EndUser string `json:"endUser"`
/**
* Unix
*
* API API
* )
*
* 15687360002019/9/18
*/
Expiration *int64 `json:"expiration"`
/**
* Unix
*
* API
* )
*
* 15687360002019/9/18
*/
TransitionToIA *int64 `json:"transitionToIA"`
/**
* Unix
*
* API
* )
*
* 15687360002019/9/18
*/
TransitionToArchive *int64 `json:"transitionToARCHIVE"`
/**
* Unix
*
* API
* )
*
* 15687360002019/9/18
*/
TransitionToDeepArchive *int64 `json:"transitionToDeepArchive"`
Error string `json:"error"`
} `json:"data,omitempty"`
}
@ -375,20 +450,60 @@ func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error
}
// Batch 接口提供了资源管理的批量操作,支持 statcopymovedeletechgmchtypedeleteAfterDays几个接口
func (m *BucketManager) Batch(operations []string) (batchOpRet []BatchOpRet, err error) {
// 没有 bucket 参数,会从 operations 中解析出 bucket
// @param operations 操作对象列表,操作对象所属的 bucket 可能会不同,但是必须属于同一个区域
func (m *BucketManager) Batch(operations []string) ([]BatchOpRet, error) {
if len(operations) == 0 {
return nil, errors.New("operations is empty")
}
bucket := ""
for _, operation := range operations {
paths := strings.Split(operation, "/")
if len(paths) < 3 {
continue
}
// 按当前模式,第 3 个 entry 是 bucket 和 key 键值对
if b, _, err := decodedEntry(paths[2]); err != nil {
continue
} else {
bucket = b
break
}
}
if len(bucket) == 0 {
return nil, errors.New("can't get one bucket from operations")
}
return m.BatchWithContext(nil, bucket, operations)
}
// BatchWithContext 接口提供了资源管理的批量操作,支持 statcopymovedeletechgmchtypedeleteAfterDays几个接口
// @param ctx context.Context
// @param bucket operations 列表中任意一个操作对象所属的 bucket
// @param operations 操作对象列表,操作对象所属的 bucket 可能会不同,但是必须属于同一个区域
func (m *BucketManager) BatchWithContext(ctx context.Context, bucket string, operations []string) ([]BatchOpRet, error) {
host, err := m.RsReqHost(bucket)
if err != nil {
return nil, err
}
return m.batchOperation(ctx, host, operations)
}
func (m *BucketManager) batchOperation(ctx context.Context, reqURL string, operations []string) (batchOpRet []BatchOpRet, err error) {
if len(operations) > 1000 {
err = errors.New("batch operation count exceeds the limit of 1000")
return
}
scheme := "http://"
if m.Cfg.UseHTTPS {
scheme = "https://"
}
reqURL := fmt.Sprintf("%s%s/batch", scheme, m.Cfg.CentralRsHost)
params := map[string][]string{
"op": operations,
}
err = m.Client.CredentialedCallWithForm(context.Background(), m.Mac, auth.TokenQiniu, &batchOpRet, "POST", reqURL, nil, params)
if ctx == nil {
ctx = context.Background()
}
reqURL = fmt.Sprintf("%s/batch", reqURL)
err = m.Client.CredentialedCallWithForm(ctx, m.Mac, auth.TokenQiniu, &batchOpRet, "POST", reqURL, nil, params)
return
}
@ -417,7 +532,7 @@ func (m *BucketManager) RsReqHost(bucket string) (reqHost string, err error) {
reqHost = m.Cfg.RsHost
}
if !strings.HasPrefix(reqHost, "http") {
reqHost = "http://" + reqHost
reqHost = endpoint(m.Cfg.UseHTTPS, reqHost)
}
return
}
@ -435,7 +550,7 @@ func (m *BucketManager) ApiReqHost(bucket string) (reqHost string, err error) {
reqHost = m.Cfg.ApiHost
}
if !strings.HasPrefix(reqHost, "http") {
reqHost = "http://" + reqHost
reqHost = endpoint(m.Cfg.UseHTTPS, reqHost)
}
return
}
@ -453,7 +568,7 @@ func (m *BucketManager) RsfReqHost(bucket string) (reqHost string, err error) {
reqHost = m.Cfg.RsfHost
}
if !strings.HasPrefix(reqHost, "http") {
reqHost = "http://" + reqHost
reqHost = endpoint(m.Cfg.UseHTTPS, reqHost)
}
return
}
@ -471,7 +586,7 @@ func (m *BucketManager) IoReqHost(bucket string) (reqHost string, err error) {
reqHost = m.Cfg.IoHost
}
if !strings.HasPrefix(reqHost, "http") {
reqHost = "http://" + reqHost
reqHost = endpoint(m.Cfg.UseHTTPS, reqHost)
}
return
}
@ -504,11 +619,8 @@ type DomainInfo struct {
// ListBucketDomains 返回绑定在存储空间中的域名信息
func (m *BucketManager) ListBucketDomains(bucket string) (info []DomainInfo, err error) {
reqHost, err := m.z0ApiHost()
if err != nil {
return
}
reqURL := fmt.Sprintf("%s/v7/domain/list?tbl=%s", reqHost, bucket)
host := getUcHost(m.Cfg.UseHTTPS)
reqURL := fmt.Sprintf("%s/v3/domains?tbl=%s", host, bucket)
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &info, "GET", reqURL, nil)
return
}
@ -527,14 +639,14 @@ func (m *BucketManager) Prefetch(bucket, key string) (err error) {
// SetImage 用来设置空间镜像源
func (m *BucketManager) SetImage(siteURL, bucket string) (err error) {
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriSetImage(siteURL, bucket))
reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS), uriSetImage(siteURL, bucket))
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil)
return
}
// SetImageWithHost 用来设置空间镜像源额外添加回源Host头部
func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err error) {
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost,
reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS),
uriSetImageWithHost(siteURL, bucket, host))
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil)
return
@ -542,76 +654,11 @@ func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err erro
// UnsetImage 用来取消空间镜像源设置
func (m *BucketManager) UnsetImage(bucket string) (err error) {
reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriUnsetImage(bucket))
reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS), uriUnsetImage(bucket))
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil)
return err
}
// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter循环列举的时候下次
// 列举的位置 marker以及每次返回的文件的最大数量limit其中limit最大为1000。
func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string,
limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) {
if limit <= 0 || limit > 1000 {
err = errors.New("invalid list limit, only allow [1, 1000]")
return
}
reqHost, reqErr := m.RsfReqHost(bucket)
if reqErr != nil {
err = reqErr
return
}
ret := listFilesRet{}
reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles(bucket, prefix, delimiter, marker, limit))
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &ret, "POST", reqURL, nil)
if err != nil {
return
}
commonPrefixes = ret.CommonPrefixes
nextMarker = ret.Marker
entries = ret.Items
if ret.Marker != "" {
hasNext = true
}
return
}
// ListBucket 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter流式返回每条数据。
func (m *BucketManager) ListBucket(bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) {
ctx := auth.WithCredentialsType(context.Background(), m.Mac, auth.TokenQiniu)
reqHost, reqErr := m.RsfReqHost(bucket)
if reqErr != nil {
err = reqErr
return
}
// limit 0 ==> 列举所有文件
reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker))
retCh, err = callChan(m.Client, ctx, "POST", reqURL, nil)
return
}
// ListBucketContext 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter流式返回每条数据。
// 接受的context可以用来取消列举操作
func (m *BucketManager) ListBucketContext(ctx context.Context, bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) {
ctx = auth.WithCredentialsType(ctx, m.Mac, auth.TokenQiniu)
reqHost, reqErr := m.RsfReqHost(bucket)
if reqErr != nil {
err = reqErr
return
}
// limit 0 ==> 列举所有文件
reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker))
retCh, err = callChan(m.Client, ctx, "POST", reqURL, nil)
return
}
type AsyncFetchParam struct {
Url string `json:"url"`
Host string `json:"host,omitempty"`
@ -683,11 +730,6 @@ func (m *BucketManager) ApiHost(bucket string) (apiHost string, err error) {
return
}
func (m *BucketManager) z0ApiHost() (apiHost string, err error) {
apiHost = regionHuadong.GetApiHost(m.Cfg.UseHTTPS)
return
}
func (m *BucketManager) Zone(bucket string) (z *Zone, err error) {
if m.Cfg.Zone != nil {
@ -774,43 +816,25 @@ func uriUnsetImage(bucket string) string {
return fmt.Sprintf("/unimage/%s", bucket)
}
func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string {
query := make(url.Values)
query.Add("bucket", bucket)
if prefix != "" {
query.Add("prefix", prefix)
}
if delimiter != "" {
query.Add("delimiter", delimiter)
}
if marker != "" {
query.Add("marker", marker)
}
if limit > 0 {
query.Add("limit", strconv.FormatInt(int64(limit), 10))
}
return fmt.Sprintf("/list?%s", query.Encode())
// EncodedEntry 生成URL Safe Base64编码的 Entry
func EncodedEntry(bucket, key string) string {
entry := fmt.Sprintf("%s:%s", bucket, key)
return base64.URLEncoding.EncodeToString([]byte(entry))
}
func uriListFiles2(bucket, prefix, delimiter, marker string) string {
query := make(url.Values)
query.Add("bucket", bucket)
if prefix != "" {
query.Add("prefix", prefix)
func decodedEntry(entry string) (bucket, key string, err error) {
value, dErr := base64.URLEncoding.DecodeString(entry)
if dErr != nil {
return "", "", dErr
}
if delimiter != "" {
query.Add("delimiter", delimiter)
bk := strings.Split(string(value), ":")
if len(bk) == 0 {
return "", "", errors.New("entry format error")
}
if marker != "" {
query.Add("marker", marker)
if len(bk) == 1 {
return bk[0], "", nil
}
return fmt.Sprintf("/v2/list?%s", query.Encode())
}
// EncodedEntry 生成URL Safe Base64编码的 Entry
func EncodedEntry(bucket, key string) string {
entry := fmt.Sprintf("%s:%s", bucket, key)
return base64.URLEncoding.EncodeToString([]byte(entry))
return bk[0], bk[1], nil
}
// EncodedEntryWithoutKey 生成 key 为null的情况下 URL Safe Base64编码的Entry
@ -909,86 +933,3 @@ func urlEncodeQuery(str string) (ret string) {
str = strings.Replace(str, "+", "%20", -1)
return str
}
type listFilesRet2 struct {
Marker string `json:"marker"`
Item ListItem `json:"item"`
Dir string `json:"dir"`
}
type listFilesRet struct {
Marker string `json:"marker"`
Items []ListItem `json:"items"`
CommonPrefixes []string `json:"commonPrefixes"`
}
// ListItem 为文件列举的返回值
type ListItem struct {
Key string `json:"key"`
Hash string `json:"hash"`
Fsize int64 `json:"fsize"`
PutTime int64 `json:"putTime"`
MimeType string `json:"mimeType"`
Type int `json:"type"`
EndUser string `json:"endUser"`
}
// 接口可能返回空的记录
func (l *ListItem) IsEmpty() (empty bool) {
return l.Key == "" && l.Hash == "" && l.Fsize == 0 && l.PutTime == 0
}
func (l *ListItem) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", l.Hash)
str += fmt.Sprintf("Fsize: %d\n", l.Fsize)
str += fmt.Sprintf("PutTime: %d\n", l.PutTime)
str += fmt.Sprintf("MimeType: %s\n", l.MimeType)
str += fmt.Sprintf("Type: %d\n", l.Type)
str += fmt.Sprintf("EndUser: %s\n", l.EndUser)
return str
}
func callChan(r *client.Client, ctx context.Context, method, reqUrl string, headers http.Header) (chan listFilesRet2, error) {
resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0)
if err != nil {
return nil, err
}
if resp.StatusCode/100 != 2 {
return nil, client.ResponseError(resp)
}
return callRetChan(ctx, resp)
}
func callRetChan(ctx context.Context, resp *http.Response) (retCh chan listFilesRet2, err error) {
retCh = make(chan listFilesRet2)
if resp.StatusCode/100 != 2 {
return nil, client.ResponseError(resp)
}
go func() {
defer resp.Body.Close()
defer close(retCh)
dec := json.NewDecoder(resp.Body)
var ret listFilesRet2
for {
err = dec.Decode(&ret)
if err != nil {
if err != io.EOF {
fmt.Fprintf(os.Stderr, "decode error: %v\n", err)
}
return
}
select {
case <-ctx.Done():
return
case retCh <- ret:
}
}
}()
return
}

@ -0,0 +1,244 @@
package storage
import (
"context"
"errors"
"fmt"
"github.com/qiniu/go-sdk/v7/auth"
"net/url"
"strconv"
)
// ListItem 为文件列举的返回值
type ListItem struct {
// 资源名
Key string `json:"key"`
// 上传时间单位100纳秒其值去掉低七位即为Unix时间戳。
PutTime int64 `json:"putTime"`
// 文件的HASH值使用hash值算法计算。
Hash string `json:"hash"`
// 资源内容的大小,单位:字节。
Fsize int64 `json:"fsize"`
// 资源的 MIME 类型。
MimeType string `json:"mimeType"`
/**
* endUser
*/
EndUser string `json:"endUser"`
/**
*
* 0
* 1
* 2
* 3
*/
Type int `json:"type"`
/**
*
* 0
* 1
*/
Status int `json:"status"`
/**
* md5
*/
Md5 string `json:"md5"`
}
// 接口可能返回空的记录
func (l *ListItem) IsEmpty() (empty bool) {
if l == nil {
return true
}
return l.Key == "" && l.Hash == "" && l.Fsize == 0 && l.PutTime == 0
}
func (l *ListItem) String() string {
str := ""
str += fmt.Sprintf("Hash: %s\n", l.Hash)
str += fmt.Sprintf("Fsize: %d\n", l.Fsize)
str += fmt.Sprintf("PutTime: %d\n", l.PutTime)
str += fmt.Sprintf("MimeType: %s\n", l.MimeType)
str += fmt.Sprintf("Type: %d\n", l.Type)
str += fmt.Sprintf("EndUser: %s\n", l.EndUser)
return str
}
type ListFilesRet struct {
Marker string `json:"marker"`
Items []ListItem `json:"items"`
CommonPrefixes []string `json:"commonPrefixes"`
}
// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter循环列举的时候下次
// 列举的位置 marker以及每次返回的文件的最大数量limit其中limit最大为1000。
func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string,
limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) {
ret, hNext, e := m.ListFilesWithContext(context.Background(), bucket,
ListInputOptionsPrefix(prefix),
ListInputOptionsDelimiter(delimiter),
ListInputOptionsMarker(marker),
ListInputOptionsLimit(limit))
if e != nil {
return nil, nil, "", false, e
}
return ret.Items, ret.CommonPrefixes, ret.Marker, hNext, nil
}
type listInputOptions struct {
prefix string
delimiter string
marker string
limit int
}
type ListInputOption func(options *listInputOptions)
func ListInputOptionsPrefix(prefix string) ListInputOption {
return func(input *listInputOptions) {
input.prefix = prefix
}
}
func ListInputOptionsDelimiter(delimiter string) ListInputOption {
return func(input *listInputOptions) {
input.delimiter = delimiter
}
}
func ListInputOptionsMarker(marker string) ListInputOption {
return func(input *listInputOptions) {
input.marker = marker
}
}
func ListInputOptionsLimit(limit int) ListInputOption {
return func(input *listInputOptions) {
input.limit = limit
}
}
//
// ListFilesWithContext
// @Description: 用来获取空间文件列表,可以根据需要指定文件的列举条件
// @receiver m BucketManager
// @param ctx context
// @param bucket 列举的 bucket
// @param options 列举的可选条件
// 列举条件-需要列举 Key 的前缀ListInputOptionsPrefix(prefix)
// 列举条件-文件的目录分隔符ListInputOptionsDelimiter(delimiter)
// 列举条件-下次列举的位置ListInputOptionsMarker(marker)
// 列举条件-每次返回的文件的最大数量ListInputOptionsLimit(limit) 范围1~1000
// @return ret 列举的对象数据
// @return hasNext 是否还有数据未被列举
// @return err 列举时的错误信息
//
func (m *BucketManager) ListFilesWithContext(ctx context.Context, bucket string, options ...ListInputOption) (ret *ListFilesRet, hasNext bool, err error) {
if len(bucket) == 0 {
return nil, false, errors.New("bucket can't empty")
}
inputOptions := listInputOptions{}
for _, option := range options {
option(&inputOptions)
}
if inputOptions.limit <= 0 || inputOptions.limit > 1000 {
return nil, false, errors.New("invalid list limit, only allow [1, 1000]")
}
ctx = auth.WithCredentialsType(ctx, m.Mac, auth.TokenQiniu)
host, reqErr := m.RsfReqHost(bucket)
if reqErr != nil {
return nil, false, reqErr
}
ret = &ListFilesRet{}
reqURL := fmt.Sprintf("%s%s", host, uriListFiles(bucket, inputOptions.prefix, inputOptions.delimiter, inputOptions.marker, inputOptions.limit))
err = m.Client.CredentialedCall(ctx, m.Mac, auth.TokenQiniu, ret, "POST", reqURL, nil)
if err != nil {
return nil, false, err
}
return ret, len(ret.Marker) > 0, nil
}
type listFilesRet2 struct {
Marker string `json:"marker"`
Item ListItem `json:"item"`
Dir string `json:"dir"`
}
// ListBucket 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter流式返回每条数据。
func (m *BucketManager) ListBucket(bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) {
return m.ListBucketContext(context.Background(), bucket, prefix, delimiter, marker)
}
// ListBucketContext 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix文件的目录 delimiter流式返回每条数据。
// 接受的context可以用来取消列举操作
func (m *BucketManager) ListBucketContext(ctx context.Context, bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) {
ret, _, lErr := m.ListFilesWithContext(ctx, bucket,
ListInputOptionsLimit(250),
ListInputOptionsPrefix(prefix),
ListInputOptionsDelimiter(delimiter),
ListInputOptionsMarker(marker))
if lErr != nil {
return nil, lErr
}
count := len(ret.CommonPrefixes) + len(ret.Items)
retCh = make(chan listFilesRet2, count)
defer close(retCh)
if len(ret.CommonPrefixes) > 0 {
for _, commonPrefix := range ret.CommonPrefixes {
retCh <- listFilesRet2{
Marker: ret.Marker,
Item: ListItem{},
Dir: commonPrefix,
}
}
}
if len(ret.Items) > 0 {
for _, item := range ret.Items {
retCh <- listFilesRet2{
Marker: ret.Marker,
Item: item,
Dir: "",
}
}
}
return retCh, err
}
func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string {
query := make(url.Values)
query.Add("bucket", bucket)
if prefix != "" {
query.Add("prefix", prefix)
}
if delimiter != "" {
query.Add("delimiter", delimiter)
}
if marker != "" {
query.Add("marker", marker)
}
if limit > 0 {
query.Add("limit", strconv.FormatInt(int64(limit), 10))
}
return fmt.Sprintf("/list?%s", query.Encode())
}

@ -127,10 +127,6 @@ func (p *FormUploader) PutFileWithoutKey(
func (p *FormUploader) putFile(
ctx context.Context, ret interface{}, upToken string,
key string, hasKey bool, localFile string, extra *PutExtra) (err error) {
if extra == nil {
extra = &PutExtra{}
}
extra.init()
f, err := os.Open(localFile)
if err != nil {
@ -183,6 +179,11 @@ func (p *FormUploader) put(
ctx context.Context, ret interface{}, upToken string,
key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) error {
if extra == nil {
extra = &PutExtra{}
}
extra.init()
seekableData, ok := data.(io.ReadSeeker)
if !ok {
dataBytes, rErr := ioutil.ReadAll(data)

@ -187,12 +187,7 @@ func (m *OperationManager) ApiHost(bucket string) (apiHost string, err error) {
}
}
scheme := "http://"
if m.Cfg.UseHTTPS {
scheme = "https://"
}
apiHost = fmt.Sprintf("%s%s", scheme, zone.ApiHost)
apiHost = endpoint(m.Cfg.UseHTTPS, zone.ApiHost)
return
}
@ -201,12 +196,6 @@ func (m *OperationManager) PrefopApiHost(persistentID string) (apiHost string) {
if m.Cfg.Zone != nil {
apiHost = m.Cfg.Zone.ApiHost
}
if m.Cfg.UseHTTPS {
apiHost = fmt.Sprintf("https://%s", apiHost)
} else {
apiHost = fmt.Sprintf("http://%s", apiHost)
}
apiHost = endpoint(m.Cfg.UseHTTPS, apiHost)
return
}

@ -27,6 +27,9 @@ type Region struct {
// 存储io 入口
IovipHost string `json:"io,omitempty"`
// 源站下载入口
IoSrcHost string `json:"io_src,omitempty"`
}
type RegionID string

@ -16,13 +16,14 @@ import (
// UcQueryRet 为查询请求的回复
type UcQueryRet struct {
TTL int `json:"ttl"`
Io map[string]map[string][]string `json:"-"`
IoInfo map[string]UcQueryIo `json:"io"`
Up map[string]UcQueryUp `json:"up"`
RsInfo map[string]UcQueryServerInfo `json:"rs"`
RsfInfo map[string]UcQueryServerInfo `json:"rsf"`
ApiInfo map[string]UcQueryServerInfo `json:"api"`
TTL int `json:"ttl"`
Io map[string]map[string][]string `json:"-"`
IoInfo map[string]UcQueryIo `json:"io"`
IoSrcInfo map[string]UcQueryIo `json:"io_src"`
Up map[string]UcQueryUp `json:"up"`
RsInfo map[string]UcQueryServerInfo `json:"rs"`
RsfInfo map[string]UcQueryServerInfo `json:"rsf"`
ApiInfo map[string]UcQueryServerInfo `json:"api"`
}
type tmpUcQueryRet UcQueryRet
@ -35,6 +36,7 @@ func (uc *UcQueryRet) UnmarshalJSON(data []byte) error {
uc.TTL = tmp.TTL
uc.IoInfo = tmp.IoInfo
uc.IoSrcInfo = tmp.IoSrcInfo
uc.Up = tmp.Up
uc.RsInfo = tmp.RsInfo
uc.RsfInfo = tmp.RsfInfo
@ -105,7 +107,7 @@ type regionV2CacheValue struct {
type regionV2CacheMap map[string]regionV2CacheValue
const regionV2CacheFileName = "query.cache.json"
const regionV2CacheFileName = "query_v2_00.cache.json"
var (
regionV2CachePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", regionV2CacheFileName)
@ -204,6 +206,11 @@ func getRegionByV2(ak, bucket string) (*Region, error) {
return nil, fmt.Errorf("empty io host list")
}
ioSrcHost := ret.getOneHostFromInfo(ret.IoSrcInfo)
if len(ioHost) == 0 {
return nil, fmt.Errorf("empty io host list")
}
rsHost := ret.getOneHostFromInfo(ret.RsInfo)
if len(rsHost) == 0 {
return nil, fmt.Errorf("empty rs host list")
@ -235,6 +242,7 @@ func getRegionByV2(ak, bucket string) (*Region, error) {
RsHost: rsHost,
RsfHost: rsfHost,
ApiHost: apiHost,
IoSrcHost: ioSrcHost,
}
regionV2Cache.Store(regionID, regionV2CacheValue{

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/qiniu/go-sdk/v7/client"
"golang.org/x/sync/singleflight"
"math"
"os"
"path/filepath"
"sync"
@ -20,6 +21,7 @@ type ucQueryV4Region struct {
RegionId string `json:"region"`
TTL int `json:"ttl"`
Io ucQueryV4Server `json:"io"`
IoSrc ucQueryV4Server `json:"io_src"`
Up ucQueryV4Server `json:"up"`
Rs ucQueryV4Server `json:"rs"`
Rsf ucQueryV4Server `json:"rsf"`
@ -47,9 +49,17 @@ type regionV4CacheValue struct {
Regions []*Region `json:"regions"`
Deadline time.Time `json:"deadline"`
}
func (r *regionV4CacheValue) getRegions() []*Region {
if r == nil {
return nil
}
return r.Regions
}
type regionV4CacheMap map[string]regionV4CacheValue
const regionV4CacheFileName = "query_v4.cache.json"
const regionV4CacheFileName = "query_v4_00.cache.json"
var (
regionV4CachePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", regionV4CacheFileName)
@ -130,10 +140,11 @@ func getRegionByV4(ak, bucket string) (*RegionGroup, error) {
regionID := fmt.Sprintf("%s:%s", ak, bucket)
//check from cache
if v, ok := regionV4Cache.Load(regionID); ok && time.Now().Before(v.(regionV4CacheValue).Deadline) {
return NewRegionGroup(v.(regionV4CacheValue).Regions...), nil
cacheValue, _ := v.(regionV4CacheValue)
return NewRegionGroup(cacheValue.getRegions()...), nil
}
newRegion, err, _ := ucQueryV2Group.Do(regionID, func() (interface{}, error) {
newRegion, err, _ := ucQueryV4Group.Do(regionID, func() (interface{}, error) {
reqURL := fmt.Sprintf("%s/v4/query?ak=%s&bucket=%s", getUcHostByDefaultProtocol(), ak, bucket)
var ret ucQueryV4Ret
@ -142,10 +153,12 @@ func getRegionByV4(ak, bucket string) (*RegionGroup, error) {
return nil, fmt.Errorf("query region error, %s", err.Error())
}
ttl := 0
ttl := math.MaxInt32
regions := make([]*Region, 0, 0)
for _, host := range ret.Hosts {
ttl = host.TTL
if ttl > host.TTL {
ttl = host.TTL
}
regions = append(regions, &Region{
SrcUpHosts: host.Up.Domains,
CdnUpHosts: host.Up.Domains,
@ -153,6 +166,7 @@ func getRegionByV4(ak, bucket string) (*RegionGroup, error) {
RsfHost: host.Rsf.getOneServer(),
ApiHost: host.Api.getOneServer(),
IovipHost: host.Io.getOneServer(),
IoSrcHost: host.IoSrc.getOneServer(),
})
}

@ -178,7 +178,7 @@ func hostAddSchemeIfNeeded(useHttps bool, host string) string {
} else if strings.Contains(host, "://") {
return host
} else {
return endpoint(true, host)
return endpoint(useHttps, host)
}
}

@ -467,26 +467,18 @@ type BucketQuota struct {
// SetBucketQuota 设置存储空间的配额限制
// 配额限制主要是两块, 空间存储量的限制和空间文件数限制
func (m *BucketManager) SetBucketQuota(bucket string, size, count int64) (err error) {
reqHost, rErr := m.z0ApiHost()
if rErr != nil {
err = rErr
return
}
reqHost = strings.TrimRight(reqHost, "/")
reqURL := fmt.Sprintf("%s/setbucketquota/%s/size/%d/count/%d", reqHost, bucket, size, count)
host := getUcHost(m.Cfg.UseHTTPS)
host = strings.TrimRight(host, "/")
reqURL := fmt.Sprintf("%s/setbucketquota/%s/size/%d/count/%d", host, bucket, size, count)
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil)
return
}
// GetBucketQuota 获取存储空间的配额信息
func (m *BucketManager) GetBucketQuota(bucket string) (quota BucketQuota, err error) {
reqHost, rErr := m.z0ApiHost()
if rErr != nil {
err = rErr
return
}
reqHost = strings.TrimRight(reqHost, "/")
reqURL := reqHost + "/getbucketquota/" + bucket
host := getUcHost(m.Cfg.UseHTTPS)
host = strings.TrimRight(host, "/")
reqURL := host + "/getbucketquota/" + bucket
err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &quota, "POST", reqURL, nil)
return
}

@ -191,7 +191,6 @@ func (manager *UploadManager) putRetryBetweenRegion(ctx context.Context, ret int
}
manager.cfg.Regions = regions
}
regions := manager.cfg.Regions.clone()
resumeVersion := "v2"

@ -1,136 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
// If we're at the front and there're extra tokens left, notify other waiters.
if isFront && s.size > s.cur {
s.notifyWaiters()
}
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: released more than held")
}
s.notifyWaiters()
s.mu.Unlock()
}
func (s *Weighted) notifyWaiters() {
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
}

2
vendor/gorm.io/gen/condition.go generated vendored

@ -27,7 +27,7 @@ func exprToCondition(exprs ...clause.Expression) []Condition {
conds := make([]Condition, 0, len(exprs))
for _, e := range exprs {
switch e := e.(type) {
case *datatypes.JSONQueryExpression, *datatypes.JSONOverlapsExpression:
case *datatypes.JSONQueryExpression, *datatypes.JSONOverlapsExpression, *datatypes.JSONArrayExpression:
conds = append(conds, &condContainer{value: e})
default:
conds = append(conds, &condContainer{err: fmt.Errorf("unsupported Expression %T to converted to Condition", e)})

19
vendor/gorm.io/gen/config.go generated vendored

@ -2,6 +2,7 @@ package gen
import (
"fmt"
"os"
"path/filepath"
"strings"
@ -53,9 +54,8 @@ type Config struct {
modelNameNS func(tableName string) (modelName string)
fileNameNS func(tableName string) (fileName string)
dataTypeMap map[string]func(detailType string) (dataType string)
dataTypeMap map[string]func(columnType gorm.ColumnType) (dataType string)
fieldJSONTagNS func(columnName string) (tagContent string)
fieldNewTagNS func(columnName string) (tagContent string)
modelOpts []ModelOpt
}
@ -94,7 +94,7 @@ func (cfg *Config) WithFileNameStrategy(ns func(tableName string) (fileName stri
}
// WithDataTypeMap specify data type mapping relationship, only work when syncing table from db
func (cfg *Config) WithDataTypeMap(newMap map[string]func(detailType string) (dataType string)) {
func (cfg *Config) WithDataTypeMap(newMap map[string]func(columnType gorm.ColumnType) (dataType string)) {
cfg.dataTypeMap = newMap
}
@ -103,11 +103,6 @@ func (cfg *Config) WithJSONTagNameStrategy(ns func(columnName string) (tagConten
cfg.fieldJSONTagNS = ns
}
// WithNewTagNameStrategy specify new tag naming strategy
func (cfg *Config) WithNewTagNameStrategy(ns func(columnName string) (tagContent string)) {
cfg.fieldNewTagNS = ns
}
// WithImportPkgPath specify import package path
func (cfg *Config) WithImportPkgPath(paths ...string) {
for i, path := range paths {
@ -131,12 +126,12 @@ func (cfg *Config) Revise() (err error) {
return fmt.Errorf("outpath is invalid: %w", err)
}
if cfg.OutPath == "" {
cfg.OutPath = "./query/"
cfg.OutPath = fmt.Sprintf(".%squery%s", string(os.PathSeparator), string(os.PathSeparator))
}
if cfg.OutFile == "" {
cfg.OutFile = cfg.OutPath + "/gen.go"
} else if !strings.Contains(cfg.OutFile, "/") {
cfg.OutFile = cfg.OutPath + "/" + cfg.OutFile
cfg.OutFile = filepath.Join(cfg.OutPath, "gen.go")
} else if !strings.Contains(cfg.OutFile, string(os.PathSeparator)) {
cfg.OutFile = filepath.Join(cfg.OutPath, cfg.OutFile)
}
cfg.queryPkgName = filepath.Base(cfg.OutPath)

@ -219,9 +219,9 @@ type RelateConfig struct {
RelateSlicePointer bool
JSONTag string
GORMTag string
NewTag string
OverwriteTag string
GORMTag GormTag
Tag Tag
OverwriteTag Tag
}
// RelateFieldPrefix return generated relation field's type
@ -237,3 +237,19 @@ func (c *RelateConfig) RelateFieldPrefix(relationshipType RelationshipType) stri
return defaultRelationshipPrefix[relationshipType]
}
}
func (c *RelateConfig) GetTag(fieldName string) Tag {
if c == nil {
return NewTag()
}
if c.OverwriteTag != nil {
return c.OverwriteTag
}
if c.Tag == nil {
c.Tag = NewTag()
}
if c.JSONTag == "" {
c.JSONTag = ns.ColumnName("", fieldName)
}
c.Tag.Set(TagKeyJson, c.JSONTag)
return c.Tag
}

@ -32,6 +32,11 @@ func NewField(table, column string, opts ...Option) Field {
return Field{expr: expr{col: toColumn(table, column, opts...)}}
}
// NewSerializer create new field2
func NewSerializer(table, column string, opts ...Option) Serializer {
return Serializer{expr: expr{col: toColumn(table, column, opts...)}}
}
// NewAsterisk create new * field
func NewAsterisk(table string, opts ...Option) Asterisk {
return Asterisk{asteriskExpr: asteriskExpr{expr{col: toColumn(table, "*", opts...)}}}

16
vendor/gorm.io/gen/field/expr.go generated vendored

@ -1,6 +1,8 @@
package field
import (
"fmt"
"strings"
"time"
"gorm.io/gorm"
@ -32,6 +34,7 @@ type Expr interface {
SubCol(col Expr) Expr
MulCol(col Expr) Expr
DivCol(col Expr) Expr
ConcatCol(cols ...Expr) Expr
// implement Condition
BeCond() interface{}
@ -241,6 +244,19 @@ func (e expr) DivCol(col Expr) Expr {
return Field{e.setE(clause.Expr{SQL: "(?) / (?)", Vars: []interface{}{e.RawExpr(), col.RawExpr()}})}
}
func (e expr) ConcatCol(cols ...Expr) Expr {
placeholders := []string{"?"}
vars := []interface{}{e.RawExpr()}
for _, col := range cols {
placeholders = append(placeholders, "?")
vars = append(vars, col.RawExpr())
}
return Field{e.setE(clause.Expr{
SQL: fmt.Sprintf("Concat(%s)", strings.Join(placeholders, ",")),
Vars: vars,
})}
}
// ======================== keyword ========================
func (e expr) As(alias string) Expr {
if e.e != nil {

@ -0,0 +1,90 @@
package field
import (
"context"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"reflect"
"gorm.io/gorm"
)
type ValuerType struct {
Column string
Value schema.SerializerValuerInterface
FucName string
}
func (v ValuerType) GormValue(ctx context.Context, db *gorm.DB) (expr clause.Expr) {
stmt := db.Statement.Schema
field := stmt.LookUpField(v.Column)
newValue, err := v.Value.Value(context.WithValue(ctx, "func_name", v.FucName), field, reflect.ValueOf(v.Value), v.Value)
db.AddError(err)
return clause.Expr{SQL: "?", Vars: []interface{}{newValue}}
}
// Field2 a standard field struct
type Serializer struct{ expr }
// Eq judge equal
func (field Serializer) Eq(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Eq{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Eq"}}}
}
// Neq judge not equal
func (field Serializer) Neq(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Neq{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Neq"}}}
}
// In ...
func (field Serializer) In(values ...schema.SerializerValuerInterface) Expr {
return expr{e: clause.IN{Column: field.RawExpr(), Values: field.toSlice(values...)}}
}
// Gt ...
func (field Serializer) Gt(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Gt{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Gt"}}}
}
// Gte ...
func (field Serializer) Gte(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Gte{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Gte"}}}
}
// Lt ...
func (field Serializer) Lt(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Lt{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Lt"}}}
}
// Lte ...
func (field Serializer) Lte(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Lte{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Lte"}}}
}
// Like ...
func (field Serializer) Like(value schema.SerializerValuerInterface) Expr {
return expr{e: clause.Like{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Like"}}}
}
// Value ...
func (field Serializer) Value(value schema.SerializerValuerInterface) AssignExpr {
return field.value(ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Value"})
}
// Sum ...
func (field Serializer) Sum() Field {
return Field{field.sum()}
}
// IfNull ...
func (field Serializer) IfNull(value schema.SerializerValuerInterface) Expr {
return field.ifNull(ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "IfNull"})
}
func (field Serializer) toSlice(values ...schema.SerializerValuerInterface) []interface{} {
slice := make([]interface{}, len(values))
for i, v := range values {
slice[i] = ValuerType{Column: field.ColumnName().String(), Value: v, FucName: "In"}
}
return slice
}

@ -1,6 +1,8 @@
package field
import (
"fmt"
"gorm.io/gorm/clause"
)
@ -121,6 +123,15 @@ func (field String) Concat(before, after string) String {
}
}
// SubstringIndex SUBSTRING_INDEX
// https://dev.mysql.com/doc/refman/8.0/en/functions.html#function_substring-index
func (field String) SubstringIndex(delim string, count int) String {
return String{expr{e: clause.Expr{
SQL: fmt.Sprintf("SUBSTRING_INDEX(?,%q,%d)", delim, count),
Vars: []interface{}{field.RawExpr()},
}}}
}
func (field String) toSlice(values []string) []interface{} {
slice := make([]interface{}, len(values))
for i, v := range values {
@ -227,6 +238,15 @@ func (field Bytes) FindInSetWith(target string) Expr {
return expr{e: clause.Expr{SQL: "FIND_IN_SET(?,?)", Vars: []interface{}{target, field.RawExpr()}}}
}
// SubstringIndex SUBSTRING_INDEX
// https://dev.mysql.com/doc/refman/8.0/en/functions.html#function_substring-index
func (field Bytes) SubstringIndex(delim string, count int) Bytes {
return Bytes{expr{e: clause.Expr{
SQL: fmt.Sprintf("SUBSTRING_INDEX(?,%q,%d)", delim, count),
Vars: []interface{}{field.RawExpr()},
}}}
}
func (field Bytes) toSlice(values [][]byte) []interface{} {
slice := make([]interface{}, len(values))
for i, v := range values {

129
vendor/gorm.io/gen/field/tag.go generated vendored

@ -0,0 +1,129 @@
package field
import (
"sort"
"strings"
)
const (
TagKeyGorm = "gorm"
TagKeyJson = "json"
//gorm tag
TagKeyGormColumn = "column"
TagKeyGormType = "type"
TagKeyGormPrimaryKey = "primaryKey"
TagKeyGormAutoIncrement = "autoIncrement"
TagKeyGormNotNull = "not null"
TagKeyGormUniqueIndex = "uniqueIndex"
TagKeyGormIndex = "index"
TagKeyGormDefault = "default"
TagKeyGormComment = "comment"
)
var (
tagKeyPriorities = map[string]int16{
TagKeyGorm: 100,
TagKeyJson: 99,
TagKeyGormColumn: 10,
TagKeyGormType: 9,
TagKeyGormPrimaryKey: 8,
TagKeyGormAutoIncrement: 7,
TagKeyGormNotNull: 6,
TagKeyGormUniqueIndex: 5,
TagKeyGormIndex: 4,
TagKeyGormDefault: 3,
TagKeyGormComment: 0,
}
)
type TagBuilder interface {
Build() string
}
type Tag map[string]string
func NewTag() Tag {
return Tag{}
}
func (tag Tag) Set(key, value string) {
tag[key] = value
}
func (tag Tag) Remove(key string) {
delete(tag, key)
}
func (tag Tag) Build() string {
if tag == nil || len(tag) == 0 {
return ""
}
tags := make([]string, 0, len(tag))
keys := tagKeySort(tag)
for _, k := range keys {
v := tag[k]
if k == "" || v == "" {
continue
}
tags = append(tags, k+":\""+v+"\"")
}
return strings.Join(tags, " ")
}
type GormTag Tag
func NewGormTag() GormTag {
return GormTag{}
}
func (tag GormTag) Set(key, value string) {
tag[key] = value
}
func (tag GormTag) Remove(key string) {
delete(tag, key)
}
func (tag GormTag) Build() string {
if tag == nil || len(tag) == 0 {
return ""
}
tags := make([]string, 0, len(tag))
keys := tagKeySort(Tag(tag))
for _, k := range keys {
v := tag[k]
if k == "" && v == "" {
continue
}
tv := make([]string, 0, 2)
if k != "" {
tv = append(tv, k)
}
if v != "" {
tv = append(tv, v)
}
tags = append(tags, strings.Join(tv, ":"))
}
return strings.Join(tags, ";")
}
func tagKeySort(tag Tag) []string {
keys := make([]string, 0, len(tag))
if len(tag) == 0 {
return keys
}
for k, _ := range tag {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool {
if tagKeyPriorities[keys[i]] == tagKeyPriorities[keys[j]] {
return keys[i] <= keys[j]
}
return tagKeyPriorities[keys[i]] > tagKeyPriorities[keys[j]]
})
return keys
}

6
vendor/gorm.io/gen/field/time.go generated vendored

@ -110,6 +110,10 @@ func (field Time) MonthName() String {
return String{expr{e: clause.Expr{SQL: "MONTHNAME(?)", Vars: []interface{}{field.RawExpr()}}}}
}
func (field Time) Year() Int {
return Int{expr{e: clause.Expr{SQL: "YEAR(?)", Vars: []interface{}{field.RawExpr()}}}}
}
// Month equal to MONTH(self)
func (field Time) Month() Int {
return Int{expr{e: clause.Expr{SQL: "MONTH(?)", Vars: []interface{}{field.RawExpr()}}}}
@ -181,7 +185,7 @@ func (field Time) Sum() Time {
}
// IfNull ...
func (field Time) IfNull(value Time) Expr {
func (field Time) IfNull(value time.Time) Expr {
return field.ifNull(value)
}

@ -1,7 +1,6 @@
package gen
import (
"fmt"
"reflect"
"regexp"
"strings"
@ -19,12 +18,12 @@ var ns = schema.NamingStrategy{}
var (
// FieldNew add new field (any type your want)
FieldNew = func(fieldName, fieldType, fieldTag string) model.CreateFieldOpt {
FieldNew = func(fieldName, fieldType string, fieldTag field.Tag) model.CreateFieldOpt {
return func(*model.Field) *model.Field {
return &model.Field{
Name: fieldName,
Type: fieldType,
OverwriteTag: fieldTag,
Name: fieldName,
Type: fieldType,
Tag: fieldTag,
}
}
}
@ -112,10 +111,10 @@ var (
}
}
// FieldTag specify GORM tag and JSON tag
FieldTag = func(columnName string, gormTag, jsonTag string) model.ModifyFieldOpt {
FieldTag = func(columnName string, tagFunc func(tag field.Tag) field.Tag) model.ModifyFieldOpt {
return func(m *model.Field) *model.Field {
if m.ColumnName == columnName {
m.GORMTag, m.JSONTag = gormTag, jsonTag
m.Tag = tagFunc(m.Tag)
}
return m
}
@ -124,7 +123,7 @@ var (
FieldJSONTag = func(columnName string, jsonTag string) model.ModifyFieldOpt {
return func(m *model.Field) *model.Field {
if m.ColumnName == columnName {
m.JSONTag = jsonTag
m.Tag.Set(field.TagKeyJson, jsonTag)
}
return m
}
@ -133,25 +132,28 @@ var (
FieldJSONTagWithNS = func(schemaName func(columnName string) (tagContent string)) model.ModifyFieldOpt {
return func(m *model.Field) *model.Field {
if schemaName != nil {
m.JSONTag = schemaName(m.ColumnName)
m.Tag.Set(field.TagKeyJson, schemaName(m.ColumnName))
}
return m
}
}
// FieldGORMTag specify GORM tag
FieldGORMTag = func(columnName string, gormTag string) model.ModifyFieldOpt {
FieldGORMTag = func(columnName string, gormTag func(tag field.GormTag) field.GormTag) model.ModifyFieldOpt {
return func(m *model.Field) *model.Field {
if m.ColumnName == columnName {
m.GORMTag = gormTag
m.GORMTag = gormTag(m.GORMTag)
}
return m
}
}
// FieldNewTag add new tag
FieldNewTag = func(columnName string, newTag string) model.ModifyFieldOpt {
FieldNewTag = func(columnName string, newTag field.Tag) model.ModifyFieldOpt {
return func(m *model.Field) *model.Field {
if m.ColumnName == columnName {
m.NewTag += " " + newTag
for k, v := range newTag {
m.Tag.Set(k, v)
}
}
return m
}
@ -162,7 +164,7 @@ var (
if schemaName == nil {
schemaName = func(name string) string { return name }
}
m.NewTag = fmt.Sprintf(`%s %s:"%s"`, m.NewTag, tagName, schemaName(m.ColumnName))
m.Tag.Set(tagName, schemaName(m.ColumnName))
return m
}
}
@ -199,18 +201,13 @@ var (
if config == nil {
config = &field.RelateConfig{}
}
if config.JSONTag == "" {
config.JSONTag = ns.ColumnName("", fieldName)
}
return func(*model.Field) *model.Field {
return &model.Field{
Name: fieldName,
Type: config.RelateFieldPrefix(relationship) + table.StructInfo.Type,
JSONTag: config.JSONTag,
GORMTag: config.GORMTag,
NewTag: config.NewTag,
OverwriteTag: config.OverwriteTag,
Name: fieldName,
Type: config.RelateFieldPrefix(relationship) + table.StructInfo.Type,
Tag: config.GetTag(fieldName),
GORMTag: config.GORMTag,
Relation: field.NewRelationWithType(
relationship, fieldName, table.StructInfo.Package+"."+table.StructInfo.Type,
table.Relations()...),
@ -228,19 +225,13 @@ var (
if config == nil {
config = &field.RelateConfig{}
}
if config.JSONTag == "" {
config.JSONTag = ns.ColumnName("", fieldName)
}
return func(*model.Field) *model.Field {
return &model.Field{
Name: fieldName,
Type: config.RelateFieldPrefix(relationship) + fieldType,
JSONTag: config.JSONTag,
GORMTag: config.GORMTag,
NewTag: config.NewTag,
OverwriteTag: config.OverwriteTag,
Name: fieldName,
Type: config.RelateFieldPrefix(relationship) + fieldType,
GORMTag: config.GORMTag,
Tag: config.GetTag(fieldName),
Relation: field.NewRelationWithModel(relationship, fieldName, fieldType, relModel),
}
}

9
vendor/gorm.io/gen/generator.go generated vendored

@ -186,7 +186,6 @@ func (g *Generator) genModelConfig(tableName string, modelName string, modelOpts
FieldWithTypeTag: g.FieldWithTypeTag,
FieldJSONTagNS: g.fieldJSONTagNS,
FieldNewTagNS: g.fieldNewTagNS,
},
}
}
@ -428,8 +427,8 @@ func (g *Generator) generateSingleQueryFile(data *genInfo) (err error) {
return err
}
defer g.info(fmt.Sprintf("generate query file: %s/%s.gen.go", g.OutPath, data.FileName))
return g.output(fmt.Sprintf("%s/%s.gen.go", g.OutPath, data.FileName), buf.Bytes())
defer g.info(fmt.Sprintf("generate query file: %s%s%s.gen.go", g.OutPath, string(os.PathSeparator), data.FileName))
return g.output(fmt.Sprintf("%s%s%s.gen.go", g.OutPath, string(os.PathSeparator), data.FileName), buf.Bytes())
}
// generateQueryUnitTestFile generate unit test file for query
@ -460,8 +459,8 @@ func (g *Generator) generateQueryUnitTestFile(data *genInfo) (err error) {
}
}
defer g.info(fmt.Sprintf("generate unit test file: %s/%s.gen_test.go", g.OutPath, data.FileName))
return g.output(fmt.Sprintf("%s/%s.gen_test.go", g.OutPath, data.FileName), buf.Bytes())
defer g.info(fmt.Sprintf("generate unit test file: %s%s%s.gen_test.go", g.OutPath, string(os.PathSeparator), data.FileName))
return g.output(fmt.Sprintf("%s%s%s.gen_test.go", g.OutPath, string(os.PathSeparator), data.FileName), buf.Bytes())
}
// generateModelFile generate model structures and save to file

@ -3,6 +3,8 @@ package helper
import (
"errors"
"fmt"
"gorm.io/gen/field"
)
// Object an object interface
@ -34,7 +36,7 @@ type Field interface {
// JSONTag return json tag
JSONTag() string
// Tag return field tag
Tag() string
Tag() field.Tag
// Comment return comment
Comment() string

@ -84,16 +84,25 @@ func GetQueryStructMetaFromObject(obj helper.Object, conf *model.Config) (*Query
}
fields := make([]*model.Field, 0, 16)
for _, field := range obj.Fields() {
for _, fl := range obj.Fields() {
tag := fl.Tag()
if tag == nil {
tag = field.NewTag()
}
if gt := fl.GORMTag(); gt != "" {
tag.Set(field.TagKeyGorm, gt)
}
if jt := fl.GORMTag(); jt != "" {
tag.Set(field.TagKeyJson, jt)
}
fields = append(fields, &model.Field{
Name: field.Name(),
Type: field.Type(),
ColumnName: field.ColumnName(),
GORMTag: field.GORMTag(),
JSONTag: field.JSONTag(),
NewTag: field.Tag(),
ColumnComment: field.Comment(),
MultilineComment: strings.Contains(field.Comment(), "\n"),
Name: fl.Name(),
Type: fl.Type(),
ColumnName: fl.ColumnName(),
Tag: tag,
ColumnComment: fl.Comment(),
MultilineComment: strings.Contains(fl.Comment(), "\n"),
})
}

@ -19,15 +19,15 @@ import (
func getFields(db *gorm.DB, conf *model.Config, columns []*model.Column) (fields []*model.Field) {
for _, col := range columns {
col.SetDataTypeMap(conf.DataTypeMap)
col.WithNS(conf.FieldJSONTagNS, conf.FieldNewTagNS)
col.WithNS(conf.FieldJSONTagNS)
m := col.ToField(conf.FieldNullable, conf.FieldCoverable, conf.FieldSignable)
if filterField(m, conf.FilterOpts) == nil {
continue
}
if t, ok := col.ColumnType.ColumnType(); ok && !conf.FieldWithTypeTag { // remove type tag if FieldWithTypeTag == false
m.GORMTag = strings.ReplaceAll(m.GORMTag, ";type:"+t, "")
if _, ok := col.ColumnType.ColumnType(); ok && !conf.FieldWithTypeTag { // remove type tag if FieldWithTypeTag == false
m.GORMTag.Remove("type")
}
m = modifyField(m, conf.ModifyOpts)

@ -73,6 +73,10 @@ func (b *QueryStructMeta) parseStruct(st interface{}) error {
// getFieldRealType get basic type of field
func (b *QueryStructMeta) getFieldRealType(f reflect.Type) string {
serializerInterface := reflect.TypeOf((*schema.SerializerInterface)(nil)).Elem()
if f.Implements(serializerInterface) || reflect.New(f).Type().Implements(serializerInterface) {
return "serializer"
}
scanValuer := reflect.TypeOf((*field.ScanValuer)(nil)).Elem()
if f.Implements(scanValuer) || reflect.New(f).Type().Implements(scanValuer) {
return "field"
@ -190,7 +194,7 @@ func (b *QueryStructMeta) ReviseDIYMethod() error {
func (b *QueryStructMeta) addMethodFromAddMethodOpt(methods ...interface{}) *QueryStructMeta {
for _, method := range methods {
modelMethods, err := parser.GetModelMethod(method, 4)
modelMethods, err := parser.GetModelMethod(method, 5)
if err != nil {
panic("add diy method err:" + err.Error())
}

@ -318,6 +318,14 @@ func (s *Section) parseWhere() (res WhereClause, err error) {
}
res.Value = append(res.Value, forClause)
s.appendTmpl(forClause.Finish())
case model.WHERE:
var whereClause WhereClause
whereClause, err = s.parseWhere()
if err != nil {
return
}
res.Value = append(res.Value, whereClause)
s.appendTmpl(whereClause.Finish(res.VarName))
case model.TRIM:
var trimClause TrimClause
trimClause, err = s.parseTrim()
@ -377,6 +385,14 @@ func (s *Section) parseSet() (res SetClause, err error) {
}
res.Value = append(res.Value, forClause)
s.appendTmpl(forClause.Finish())
case model.WHERE:
var whereClause WhereClause
whereClause, err = s.parseWhere()
if err != nil {
return
}
res.Value = append(res.Value, whereClause)
s.appendTmpl(whereClause.Finish(res.VarName))
case model.TRIM:
var trimClause TrimClause
trimClause, err = s.parseTrim()
@ -435,6 +451,14 @@ func (s *Section) parseTrim() (res TrimClause, err error) {
}
res.Value = append(res.Value, forClause)
s.appendTmpl(forClause.Finish())
case model.WHERE:
var whereClause WhereClause
whereClause, err = s.parseWhere()
if err != nil {
return
}
res.Value = append(res.Value, whereClause)
s.appendTmpl(whereClause.Finish(res.VarName))
case model.END:
return
default:

@ -2,7 +2,6 @@ package model
import (
"bytes"
"fmt"
"strings"
"gorm.io/gen/field"
@ -170,31 +169,22 @@ type Field struct {
ColumnName string
ColumnComment string
MultilineComment bool
JSONTag string
GORMTag string
NewTag string
OverwriteTag string
Tag field.Tag
GORMTag field.GormTag
CustomGenType string
Relation *field.Relation
}
// Tags ...
func (m *Field) Tags() string {
if m.OverwriteTag != "" {
return strings.TrimSpace(m.OverwriteTag)
if _, ok := m.Tag[field.TagKeyGorm]; ok {
return m.Tag.Build()
}
var tags strings.Builder
if gormTag := strings.TrimSpace(m.GORMTag); gormTag != "" {
tags.WriteString(fmt.Sprintf(`gorm:"%s" `, gormTag))
if gormTag := strings.TrimSpace(m.GORMTag.Build()); gormTag != "" {
m.Tag.Set(field.TagKeyGorm, gormTag)
}
if jsonTag := strings.TrimSpace(m.JSONTag); jsonTag != "" {
tags.WriteString(fmt.Sprintf(`json:"%s" `, jsonTag))
}
if newTag := strings.TrimSpace(m.NewTag); newTag != "" {
tags.WriteString(newTag)
}
return strings.TrimSpace(tags.String())
return m.Tag.Build()
}
// IsRelation ...
@ -222,6 +212,8 @@ func (m *Field) GenType() string {
return "Time"
case "json.RawMessage", "[]byte":
return "Bytes"
case "serializer":
return "Serializer"
default:
return "Field"
}

@ -33,7 +33,7 @@ type NameStrategy struct {
// FieldConfig field configuration
type FieldConfig struct {
DataTypeMap map[string]func(detailType string) (dataType string)
DataTypeMap map[string]func(columnType gorm.ColumnType) (dataType string)
FieldNullable bool // generate pointer when field is nullable
FieldCoverable bool // generate pointer when field has default value
@ -42,7 +42,6 @@ type FieldConfig struct {
FieldWithTypeTag bool // generate with gorm column type tag
FieldJSONTagNS func(columnName string) string
FieldNewTagNS func(columnName string) string
ModifyOpts []FieldOption
FilterOpts []FieldOption

@ -1,11 +1,11 @@
package model
import (
"bytes"
"fmt"
"reflect"
"strings"
"gorm.io/gen/field"
"gorm.io/gorm"
)
@ -15,20 +15,19 @@ type Column struct {
TableName string `gorm:"column:TABLE_NAME"`
Indexes []*Index `gorm:"-"`
UseScanType bool `gorm:"-"`
dataTypeMap map[string]func(detailType string) (dataType string) `gorm:"-"`
dataTypeMap map[string]func(columnType gorm.ColumnType) (dataType string) `gorm:"-"`
jsonTagNS func(columnName string) string `gorm:"-"`
newTagNS func(columnName string) string `gorm:"-"`
}
// SetDataTypeMap set data type map
func (c *Column) SetDataTypeMap(m map[string]func(detailType string) (dataType string)) {
func (c *Column) SetDataTypeMap(m map[string]func(columnType gorm.ColumnType) (dataType string)) {
c.dataTypeMap = m
}
// GetDataType get data type
func (c *Column) GetDataType() (fieldtype string) {
if mapping, ok := c.dataTypeMap[c.DatabaseTypeName()]; ok {
return mapping(c.columnType())
return mapping(c.ColumnType)
}
if c.UseScanType && c.ScanType() != nil {
return c.ScanType().String()
@ -37,14 +36,11 @@ func (c *Column) GetDataType() (fieldtype string) {
}
// WithNS with name strategy
func (c *Column) WithNS(jsonTagNS, newTagNS func(columnName string) string) {
c.jsonTagNS, c.newTagNS = jsonTagNS, newTagNS
func (c *Column) WithNS(jsonTagNS func(columnName string) string) {
c.jsonTagNS = jsonTagNS
if c.jsonTagNS == nil {
c.jsonTagNS = func(n string) string { return n }
}
if c.newTagNS == nil {
c.newTagNS = func(string) string { return "" }
}
}
// ToField convert to field
@ -75,8 +71,7 @@ func (c *Column) ToField(nullable, coverable, signable bool) *Field {
ColumnName: c.Name(),
MultilineComment: c.multilineComment(),
GORMTag: c.buildGormTag(),
JSONTag: c.jsonTagNS(c.Name()),
NewTag: c.newTagNS(c.Name()),
Tag: map[string]string{field.TagKeyJson: c.jsonTagNS(c.Name())},
ColumnComment: comment,
}
}
@ -86,19 +81,20 @@ func (c *Column) multilineComment() bool {
return ok && strings.Contains(cm, "\n")
}
func (c *Column) buildGormTag() string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("column:%s;type:%s", c.Name(), c.columnType()))
func (c *Column) buildGormTag() field.GormTag {
tag := field.NewGormTag()
tag.Set(field.TagKeyGormColumn, c.Name())
tag.Set(field.TagKeyGormType, c.columnType())
isPriKey, ok := c.PrimaryKey()
isValidPriKey := ok && isPriKey
if isValidPriKey {
buf.WriteString(";primaryKey")
tag.Set(field.TagKeyGormPrimaryKey, "")
if at, ok := c.AutoIncrement(); ok {
buf.WriteString(fmt.Sprintf(";autoIncrement:%t", at))
tag.Set(field.TagKeyGormAutoIncrement, fmt.Sprintf("%t", at))
}
} else if n, ok := c.Nullable(); ok && !n {
buf.WriteString(";not null")
tag.Set(field.TagKeyGormNotNull, "")
}
for _, idx := range c.Indexes {
@ -109,16 +105,22 @@ func (c *Column) buildGormTag() string {
continue
}
if uniq, _ := idx.Unique(); uniq {
buf.WriteString(fmt.Sprintf(";uniqueIndex:%s,priority:%d", idx.Name(), idx.Priority))
tag.Set(field.TagKeyGormUniqueIndex, fmt.Sprintf("%s,priority:%d", idx.Name(), idx.Priority))
} else {
buf.WriteString(fmt.Sprintf(";index:%s,priority:%d", idx.Name(), idx.Priority))
tag.Set(field.TagKeyGormIndex, fmt.Sprintf("%s,priority:%d", idx.Name(), idx.Priority))
}
}
if dtValue := c.defaultTagValue(); !isValidPriKey && c.needDefaultTag(dtValue) { // cannot set default tag for primary key
buf.WriteString(fmt.Sprintf(`;default:%s`, dtValue))
if dtValue := c.defaultTagValue(); c.needDefaultTag(dtValue) { // cannot set default tag for primary key
tag.Set(field.TagKeyGormDefault, dtValue)
}
if comment, ok := c.Comment(); ok && comment != "" {
if c.multilineComment() {
comment = strings.ReplaceAll(comment, "\n", "\\n")
}
tag.Set(field.TagKeyGormComment, comment)
}
return buf.String()
return tag
}
// needDefaultTag check if default tag needed

@ -293,6 +293,9 @@ func (p *Param) astGetEltType(expr ast.Expr) {
p.astGetEltType(v.X)
case *ast.InterfaceType:
p.Type = "interface{}"
case *ast.ArrayType:
p.astGetEltType(v.Elt)
p.Type = "[]" + p.Type
default:
log.Fatalf("unknow param type: %+v", v)
}

@ -19,13 +19,7 @@ import (
// {{.ModelStructName}} {{.StructComment}}
type {{.ModelStructName}} struct {
{{range .Fields}}
{{if .MultilineComment -}}
/*
{{.ColumnComment}}
*/
{{end -}}
{{.Name}} {{.Type}} ` + "`{{.Tags}}` " +
"{{if not .MultilineComment}}{{if .ColumnComment}}// {{.ColumnComment}}{{end}}{{end}}" +
`{{end}}
}

@ -84,10 +84,14 @@ func (q *Query) Transaction(fc func(tx *Query) error, opts ...*sql.TxOptions) er
}
func (q *Query) Begin(opts ...*sql.TxOptions) *QueryTx {
return &QueryTx{q.clone(q.db.Begin(opts...))}
tx := q.db.Begin(opts...)
return &QueryTx{Query: q.clone(tx), Error: tx.Error}
}
type QueryTx struct{ *Query }
type QueryTx struct {
*Query
Error error
}
func (q *QueryTx) Commit() error {
return q.db.Commit().Error

@ -237,6 +237,11 @@ func (a {{$.QueryStructName}}{{$relationship}}{{$relation.Name}}) WithContext(ct
return &a
}
func (a {{$.QueryStructName}}{{$relationship}}{{$relation.Name}}) Session(session *gorm.Session) *{{$.QueryStructName}}{{$relationship}}{{$relation.Name}} {
a.db = a.db.Session(session)
return &a
}
func (a {{$.QueryStructName}}{{$relationship}}{{$relation.Name}}) Model(m *{{$.StructInfo.Package}}.{{$.StructInfo.Type}}) *{{$.QueryStructName}}{{$relationship}}{{$relation.Name}}Tx {
return &{{$.QueryStructName}}{{$relationship}}{{$relation.Name}}Tx{a.db.Model(m).Association(a.Name())}
}

11
vendor/modules.txt vendored

@ -8,7 +8,7 @@ github.com/aliyun/aliyun-oss-go-sdk/oss
## explicit; go 1.16
github.com/allegro/bigcache/v3
github.com/allegro/bigcache/v3/queue
# github.com/baidubce/bce-sdk-go v0.9.148
# github.com/baidubce/bce-sdk-go v0.9.149
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
@ -75,8 +75,8 @@ github.com/gin-gonic/gin/binding
github.com/gin-gonic/gin/internal/bytesconv
github.com/gin-gonic/gin/internal/json
github.com/gin-gonic/gin/render
# github.com/go-co-op/gocron v1.22.2
## explicit; go 1.19
# github.com/go-co-op/gocron v1.23.0
## explicit; go 1.20
github.com/go-co-op/gocron
# github.com/go-logr/logr v1.2.4
## explicit; go 1.16
@ -267,7 +267,7 @@ github.com/pelletier/go-toml/v2/unstable
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
# github.com/qiniu/go-sdk/v7 v7.14.0
# github.com/qiniu/go-sdk/v7 v7.15.0
## explicit; go 1.14
github.com/qiniu/go-sdk/v7
github.com/qiniu/go-sdk/v7/auth
@ -492,7 +492,6 @@ golang.org/x/net/idna
# golang.org/x/sync v0.1.0
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
# golang.org/x/sys v0.7.0
## explicit; go 1.17
@ -590,7 +589,7 @@ gorm.io/driver/mysql
# gorm.io/driver/postgres v1.5.0
## explicit; go 1.14
gorm.io/driver/postgres
# gorm.io/gen v0.3.21
# gorm.io/gen v0.3.22
## explicit; go 1.18
gorm.io/gen
gorm.io/gen/field

Loading…
Cancel
Save