From ddcc84f1200c9835d0abed3cb393d2e1208866d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=85=89=E6=98=A5?= Date: Tue, 19 Jul 2022 13:19:22 +0800 Subject: [PATCH] - update dorm --- go.mod | 83 +- go.sum | 133 +- utils/dorm/yiigo.go | 21 + utils/dorm/yiigo_mongodb.go | 18 + utils/dorm/yiigo_mysql.go | 27 + utils/dorm/yiigo_redis.go | 26 + vendor/entgo.io/ent/dialect/dialect.go | 208 + vendor/entgo.io/ent/dialect/sql/builder.go | 3645 +++++++++++++++++ vendor/entgo.io/ent/dialect/sql/driver.go | 184 + vendor/entgo.io/ent/dialect/sql/scan.go | 272 ++ .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/README.md | 188 - .../github.com/PuerkitoBio/purell/purell.go | 379 -- .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 220 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../baidubce/bce-sdk-go/bce/config.go | 2 +- vendor/github.com/daodao97/fly/README.md | 16 +- vendor/github.com/daodao97/fly/model.go | 26 +- vendor/github.com/daodao97/fly/model_has.go | 3 + vendor/github.com/daodao97/fly/model_with.go | 18 +- vendor/github.com/daodao97/fly/validator.go | 167 +- .../github.com/denisenkom/go-mssqldb/error.go | 4 + .../github.com/denisenkom/go-mssqldb/token.go | 2 +- vendor/github.com/edsrzf/mmap-go/.gitignore | 3 + vendor/github.com/edsrzf/mmap-go/README.md | 18 +- .../github.com/edsrzf/mmap-go/mmap_plan9.go | 27 + .../github.com/edsrzf/mmap-go/mmap_windows.go | 23 +- .../emicklei/go-restful/.travis.yml | 6 - .../github.com/emicklei/go-restful/Makefile | 7 - .../emicklei/go-restful/{ => v3}/.gitignore | 1 + .../emicklei/go-restful/v3/.goconvey | 1 + .../emicklei/go-restful/v3/.travis.yml | 13 + .../emicklei/go-restful/{ => v3}/CHANGES.md | 101 +- .../emicklei/go-restful/{ => v3}/LICENSE | 0 .../emicklei/go-restful/v3/Makefile | 8 + .../emicklei/go-restful/{ => v3}/README.md | 42 +- .../emicklei/go-restful/v3/SECURITY.md | 13 + .../emicklei/go-restful/{ => v3}/Srcfile | 0 .../emicklei/go-restful/{ => v3}/compress.go | 6 +- .../go-restful/{ => v3}/compressor_cache.go | 0 .../go-restful/{ => v3}/compressor_pools.go | 0 .../go-restful/{ => v3}/compressors.go | 0 .../emicklei/go-restful/{ => v3}/constants.go | 0 .../emicklei/go-restful/{ => v3}/container.go | 137 +- .../go-restful/{ => v3}/cors_filter.go | 67 +- .../emicklei/go-restful/{ => v3}/curly.go | 13 +- .../go-restful/{ => v3}/curly_route.go | 0 .../emicklei/go-restful/v3/custom_verb.go | 29 + .../emicklei/go-restful/{ => v3}/doc.go | 6 +- .../go-restful/{ => v3}/entity_accessors.go | 0 .../emicklei/go-restful/v3/extensions.go | 21 + .../emicklei/go-restful/{ => v3}/filter.go | 8 +- .../emicklei/go-restful/{ => v3}/json.go | 0 .../emicklei/go-restful/{ => v3}/jsoniter.go | 0 .../emicklei/go-restful/{ => v3}/jsr311.go | 33 +- .../emicklei/go-restful/{ => v3}/log/log.go | 0 .../emicklei/go-restful/{ => v3}/logger.go | 2 +- .../emicklei/go-restful/{ => v3}/mime.go | 0 .../go-restful/{ => v3}/options_filter.go | 0 .../emicklei/go-restful/{ => v3}/parameter.go | 101 +- .../go-restful/{ => v3}/path_expression.go | 0 .../go-restful/{ => v3}/path_processor.go | 15 +- .../emicklei/go-restful/{ => v3}/request.go | 24 +- .../emicklei/go-restful/{ => v3}/response.go | 11 +- .../emicklei/go-restful/{ => v3}/route.go | 40 +- .../go-restful/{ => v3}/route_builder.go | 108 +- .../emicklei/go-restful/v3/route_reader.go | 66 + .../emicklei/go-restful/{ => v3}/router.go | 0 .../go-restful/{ => v3}/service_error.go | 11 +- .../go-restful/{ => v3}/web_service.go | 37 +- .../{ => v3}/web_service_container.go | 0 .../fsnotify/fsnotify/.editorconfig | 12 + .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.gitignore | 6 + vendor/github.com/fsnotify/fsnotify/.mailmap | 2 + vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 357 ++ .../fsnotify/fsnotify/CONTRIBUTING.md | 60 + .../urlesc => fsnotify/fsnotify}/LICENSE | 1 + vendor/github.com/fsnotify/fsnotify/README.md | 120 + vendor/github.com/fsnotify/fsnotify/fen.go | 38 + .../github.com/fsnotify/fsnotify/fsnotify.go | 69 + .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 + .../github.com/fsnotify/fsnotify/inotify.go | 351 ++ .../fsnotify/fsnotify/inotify_poller.go | 187 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 +++ .../fsnotify/fsnotify/open_mode_bsd.go | 12 + .../fsnotify/fsnotify/open_mode_darwin.go | 13 + .../github.com/fsnotify/fsnotify/windows.go | 586 +++ .../jsonreference/internal/normalize_url.go | 63 + .../go-openapi/jsonreference/reference.go | 6 +- .../github.com/go-openapi/swag/.gitattributes | 2 + .../github.com/go-openapi/swag/.golangci.yml | 11 + vendor/github.com/go-openapi/swag/.travis.yml | 37 - vendor/github.com/go-openapi/swag/file.go | 33 + .../github.com/go-openapi/swag/post_go18.go | 1 + .../github.com/go-openapi/swag/post_go19.go | 1 + vendor/github.com/go-openapi/swag/pre_go18.go | 1 + vendor/github.com/go-openapi/swag/pre_go19.go | 1 + vendor/github.com/go-stack/stack/README.md | 38 - vendor/github.com/go-stack/stack/stack.go | 400 -- vendor/github.com/goccy/go-json/CHANGELOG.md | 15 + .../goccy/go-json/internal/decoder/compile.go | 11 +- .../goccy/go-json/internal/decoder/string.go | 24 +- .../go-json/internal/encoder/compiler.go | 7 +- .../goccy/go-json/internal/encoder/vm/vm.go | 10 +- .../go-json/internal/encoder/vm_color/vm.go | 10 +- .../internal/encoder/vm_color_indent/vm.go | 10 +- .../go-json/internal/encoder/vm_indent/vm.go | 10 +- .../github.com/golang-sql/sqlexp/messages.go | 3 +- .../google/gnostic/jsonschema/display.go | 17 +- .../google/gnostic/jsonschema/models.go | 8 +- .../google/gnostic/jsonschema/reader.go | 1 - .../google/gnostic/jsonschema/writer.go | 30 +- .../google/gnostic/openapiv2/OpenAPIv2.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.pb.go | 13 +- .../google/gnostic/openapiv3/OpenAPIv3.proto | 2 +- .../google/gnostic/openapiv3/README.md | 4 + .../gnostic/openapiv3/annotations.pb.go | 183 + .../gnostic/openapiv3/annotations.proto | 60 + vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 + .../google/gofuzz/bytesource/bytesource.go | 81 + vendor/github.com/google/gofuzz/fuzz.go | 137 +- .../github.com/gorilla/websocket/.gitignore | 25 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 39 + vendor/github.com/gorilla/websocket/client.go | 422 ++ .../gorilla/websocket/compression.go | 148 + vendor/github.com/gorilla/websocket/conn.go | 1230 ++++++ vendor/github.com/gorilla/websocket/doc.go | 227 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/mask_safe.go | 16 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 77 + vendor/github.com/gorilla/websocket/server.go | 365 ++ .../gorilla/websocket/tls_handshake.go | 21 + .../gorilla/websocket/tls_handshake_116.go | 21 + vendor/github.com/gorilla/websocket/util.go | 283 ++ .../gorilla/websocket/x_net_proxy.go | 473 +++ .../hashicorp/go-version/CHANGELOG.md | 45 + .../github.com/hashicorp/go-version/LICENSE | 354 ++ .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 296 ++ .../hashicorp/go-version/version.go | 407 ++ .../go-version/version_collection.go | 17 + .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/.travis.yml | 5 + vendor/github.com/imdario/mergo/README.md | 79 +- vendor/github.com/imdario/mergo/doc.go | 141 +- vendor/github.com/imdario/mergo/map.go | 4 + vendor/github.com/imdario/mergo/merge.go | 179 +- vendor/github.com/imdario/mergo/mergo.go | 25 +- vendor/github.com/jmoiron/sqlx/.gitignore | 25 + vendor/github.com/jmoiron/sqlx/.travis.yml | 26 + vendor/github.com/jmoiron/sqlx/LICENSE | 23 + vendor/github.com/jmoiron/sqlx/README.md | 213 + vendor/github.com/jmoiron/sqlx/bind.go | 265 ++ vendor/github.com/jmoiron/sqlx/doc.go | 12 + vendor/github.com/jmoiron/sqlx/named.go | 458 +++ .../github.com/jmoiron/sqlx/named_context.go | 132 + .../jmoiron/sqlx/reflectx/README.md | 17 + .../jmoiron/sqlx/reflectx/reflect.go | 444 ++ vendor/github.com/jmoiron/sqlx/sqlx.go | 1051 +++++ .../github.com/jmoiron/sqlx/sqlx_context.go | 414 ++ vendor/github.com/joho/godotenv/.gitignore | 1 + vendor/github.com/joho/godotenv/LICENCE | 23 + vendor/github.com/joho/godotenv/README.md | 188 + vendor/github.com/joho/godotenv/godotenv.go | 363 ++ vendor/github.com/joho/godotenv/renovate.json | 5 + vendor/github.com/lib/pq/conn.go | 8 +- vendor/github.com/lib/pq/connector.go | 5 + vendor/github.com/lib/pq/copy.go | 38 + vendor/github.com/lib/pq/encode.go | 8 +- vendor/github.com/lib/pq/error.go | 5 + vendor/github.com/lib/pq/ssl_permissions.go | 80 +- .../mailru/easyjson/jlexer/lexer.go | 14 + .../github.com/mattn/go-colorable/.travis.yml | 15 - .../github.com/mattn/go-colorable/README.md | 2 +- .../mattn/go-colorable/colorable_appengine.go | 1 + .../mattn/go-colorable/colorable_others.go | 4 +- .../mattn/go-colorable/colorable_windows.go | 14 +- .../mattn/go-colorable/noncolorable.go | 15 +- .../github.com/montanaflynn/stats/.gitignore | 5 + .../github.com/montanaflynn/stats/.travis.yml | 29 + .../montanaflynn/stats/CHANGELOG.md | 598 +++ .../montanaflynn/stats/DOCUMENTATION.md | 1237 ++++++ vendor/github.com/montanaflynn/stats/LICENSE | 21 + vendor/github.com/montanaflynn/stats/Makefile | 34 + .../github.com/montanaflynn/stats/README.md | 228 ++ .../montanaflynn/stats/correlation.go | 60 + .../montanaflynn/stats/cumulative_sum.go | 21 + vendor/github.com/montanaflynn/stats/data.go | 169 + .../montanaflynn/stats/deviation.go | 57 + .../montanaflynn/stats/distances.go | 88 + vendor/github.com/montanaflynn/stats/doc.go | 23 + .../github.com/montanaflynn/stats/entropy.go | 31 + .../github.com/montanaflynn/stats/errors.go | 35 + .../github.com/montanaflynn/stats/legacy.go | 49 + vendor/github.com/montanaflynn/stats/load.go | 199 + vendor/github.com/montanaflynn/stats/max.go | 26 + vendor/github.com/montanaflynn/stats/mean.go | 60 + .../github.com/montanaflynn/stats/median.go | 25 + vendor/github.com/montanaflynn/stats/min.go | 26 + vendor/github.com/montanaflynn/stats/mode.go | 47 + vendor/github.com/montanaflynn/stats/norm.go | 254 ++ .../github.com/montanaflynn/stats/outlier.go | 44 + .../montanaflynn/stats/percentile.go | 86 + .../github.com/montanaflynn/stats/quartile.go | 74 + .../github.com/montanaflynn/stats/ranksum.go | 183 + .../montanaflynn/stats/regression.go | 113 + vendor/github.com/montanaflynn/stats/round.go | 38 + .../github.com/montanaflynn/stats/sample.go | 76 + .../github.com/montanaflynn/stats/sigmoid.go | 18 + .../github.com/montanaflynn/stats/softmax.go | 25 + vendor/github.com/montanaflynn/stats/sum.go | 18 + vendor/github.com/montanaflynn/stats/util.go | 43 + .../github.com/montanaflynn/stats/variance.go | 105 + vendor/github.com/nsqio/go-nsq/AUTHORS | 15 + vendor/github.com/nsqio/go-nsq/ChangeLog.md | 284 ++ vendor/github.com/nsqio/go-nsq/LICENSE | 17 + vendor/github.com/nsqio/go-nsq/README.md | 19 + vendor/github.com/nsqio/go-nsq/UPGRADING.md | 180 + vendor/github.com/nsqio/go-nsq/api_request.go | 78 + vendor/github.com/nsqio/go-nsq/command.go | 221 + vendor/github.com/nsqio/go-nsq/config.go | 674 +++ vendor/github.com/nsqio/go-nsq/config_flag.go | 31 + vendor/github.com/nsqio/go-nsq/conn.go | 765 ++++ vendor/github.com/nsqio/go-nsq/consumer.go | 1230 ++++++ vendor/github.com/nsqio/go-nsq/delegates.go | 139 + vendor/github.com/nsqio/go-nsq/doc.go | 88 + vendor/github.com/nsqio/go-nsq/errors.go | 44 + vendor/github.com/nsqio/go-nsq/message.go | 164 + vendor/github.com/nsqio/go-nsq/producer.go | 427 ++ vendor/github.com/nsqio/go-nsq/protocol.go | 100 + vendor/github.com/nsqio/go-nsq/states.go | 8 + vendor/github.com/nsqio/go-nsq/version.go | 4 + .../shenghui0779/vitess_pool/.gitignore | 15 + .../shenghui0779/vitess_pool/LICENSE | 21 + .../shenghui0779/vitess_pool/README.md | 4 + .../shenghui0779/vitess_pool/atomic.go | 66 + .../shenghui0779/vitess_pool/doc.go | 3 + .../shenghui0779/vitess_pool/pool.go | 400 ++ .../shenghui0779/vitess_pool/semaphore.go | 64 + .../shenghui0779/vitess_pool/timer.go | 160 + .../github.com/shenghui0779/yiigo/.gitignore | 22 + vendor/github.com/shenghui0779/yiigo/LICENSE | 201 + .../github.com/shenghui0779/yiigo/README.md | 521 +++ .../github.com/shenghui0779/yiigo/crypto.go | 701 ++++ vendor/github.com/shenghui0779/yiigo/db.go | 225 + vendor/github.com/shenghui0779/yiigo/env.go | 182 + vendor/github.com/shenghui0779/yiigo/form.go | 400 ++ vendor/github.com/shenghui0779/yiigo/grpc.go | 167 + vendor/github.com/shenghui0779/yiigo/hash.go | 108 + .../github.com/shenghui0779/yiigo/helper.go | 260 ++ vendor/github.com/shenghui0779/yiigo/http.go | 267 ++ vendor/github.com/shenghui0779/yiigo/init.go | 103 + .../github.com/shenghui0779/yiigo/location.go | 309 ++ .../github.com/shenghui0779/yiigo/logger.go | 146 + vendor/github.com/shenghui0779/yiigo/mongo.go | 68 + vendor/github.com/shenghui0779/yiigo/mutex.go | 134 + vendor/github.com/shenghui0779/yiigo/nsq.go | 155 + vendor/github.com/shenghui0779/yiigo/redis.go | 317 ++ .../shenghui0779/yiigo/sql_builder.go | 1016 +++++ vendor/github.com/shenghui0779/yiigo/ssh.go | 82 + .../shenghui0779/yiigo/timingwheel.go | 275 ++ .../shenghui0779/yiigo/validator.go | 167 + .../shenghui0779/yiigo/websocket.go | 241 ++ vendor/github.com/tidwall/pretty/LICENSE | 20 + vendor/github.com/tidwall/pretty/README.md | 122 + vendor/github.com/tidwall/pretty/pretty.go | 674 +++ .../mongo-driver/bson/bsoncodec/bsoncodec.go | 22 + .../bson/bsoncodec/default_value_decoders.go | 16 +- .../mongo-driver/bson/bsoncodec/doc.go | 6 + .../bson/bsoncodec/empty_interface_codec.go | 17 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 21 + .../mongo-driver/bson/bsonrw/value_reader.go | 9 +- .../mongo-driver/bson/decoder.go | 23 + .../mongo-driver/internal/csfle_util.go | 39 + .../mongo-driver/internal/csot_util.go | 34 + .../mongo-driver/internal/error.go | 4 + .../internal/randutil/rand/bits.go | 38 + .../internal/randutil/rand/exp.go | 223 + .../internal/randutil/rand/normal.go | 158 + .../internal/randutil/rand/rand.go | 374 ++ .../internal/randutil/rand/rng.go | 93 + .../internal/randutil/randutil.go | 64 +- .../{x/mongo/driver => internal}/uuid/uuid.go | 35 +- .../mongo-driver/mongo/batch_cursor.go | 6 + .../mongo-driver/mongo/bulk_write.go | 29 +- .../mongo-driver/mongo/bulk_write_models.go | 6 +- .../mongo-driver/mongo/change_stream.go | 39 +- .../mongo/change_stream_deployment.go | 4 + .../mongo-driver/mongo/client.go | 176 +- .../mongo-driver/mongo/client_encryption.go | 199 +- .../mongo-driver/mongo/collection.go | 222 +- .../mongo-driver/mongo/cursor.go | 2 +- .../mongo-driver/mongo/database.go | 210 +- .../go.mongodb.org/mongo-driver/mongo/doc.go | 27 +- .../mongo-driver/mongo/errors.go | 3 + .../mongo-driver/mongo/index_view.go | 19 +- .../mongo-driver/mongo/mongo.go | 11 - .../mongo-driver/mongo/mongocryptd.go | 26 +- .../mongo/options/aggregateoptions.go | 12 +- .../mongo/options/autoencryptionoptions.go | 54 +- .../mongo/options/bulkwriteoptions.go | 15 +- .../mongo/options/changestreamoptions.go | 45 +- .../mongo/options/clientoptions.go | 90 +- .../mongo/options/collectionoptions.go | 16 +- .../mongo/options/countoptions.go | 24 + .../mongo/options/createcollectionoptions.go | 59 +- .../mongo/options/datakeyoptions.go | 13 + .../mongo-driver/mongo/options/dboptions.go | 16 +- .../mongo/options/deleteoptions.go | 13 + .../mongo/options/distinctoptions.go | 21 + .../mongo/options/encryptoptions.go | 49 +- .../mongo/options/estimatedcountoptions.go | 22 +- .../mongo-driver/mongo/options/findoptions.go | 155 +- .../mongo/options/gridfsoptions.go | 8 + .../mongo/options/indexoptions.go | 28 +- .../mongo/options/insertoptions.go | 30 +- .../mongo/options/listdatabasesoptions.go | 2 +- .../mongo/options/mongooptions.go | 14 +- .../mongo/options/replaceoptions.go | 15 +- .../mongo/options/rewrapdatakeyoptions.go | 52 + .../mongo/options/sessionoptions.go | 10 +- .../mongo/options/transactionoptions.go | 8 + .../mongo/options/updateoptions.go | 15 +- .../mongo-driver/mongo/readpref/options.go | 2 +- .../mongo-driver/mongo/results.go | 17 + .../mongo-driver/mongo/session.go | 14 +- .../mongo/writeconcern/writeconcern.go | 14 +- .../mongo-driver/version/version.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 16 +- .../mongo-driver/x/bsonx/bsoncore/document.go | 30 +- .../x/bsonx/bsoncore/document_sequence.go | 6 + .../mongo-driver/x/bsonx/registry.go | 6 + .../driver/auth/internal/gssapi/gss_wrapper.c | 6 + .../driver/auth/internal/gssapi/gss_wrapper.h | 6 + .../auth/internal/gssapi/sspi_wrapper.c | 6 + .../auth/internal/gssapi/sspi_wrapper.h | 6 + .../x/mongo/driver/batch_cursor.go | 22 +- .../mongo-driver/x/mongo/driver/batches.go | 6 + .../x/mongo/driver/compression.go | 30 +- .../x/mongo/driver/connstring/connstring.go | 87 +- .../mongo-driver/x/mongo/driver/crypt.go | 75 +- .../mongo-driver/x/mongo/driver/driver.go | 24 +- .../mongo-driver/x/mongo/driver/errors.go | 9 + .../mongo-driver/x/mongo/driver/legacy.go | 6 + .../x/mongo/driver/mongocrypt/mongocrypt.go | 163 +- .../mongocrypt/mongocrypt_not_enabled.go | 17 + .../options/mongocrypt_context_options.go | 80 +- .../mongocrypt/options/mongocrypt_options.go | 33 +- .../mongo-driver/x/mongo/driver/operation.go | 91 +- .../x/mongo/driver/operation/aggregate.go | 19 +- .../x/mongo/driver/operation/command.go | 15 +- .../x/mongo/driver/operation/count.go | 68 +- .../x/mongo/driver/operation/create.go | 89 +- .../x/mongo/driver/operation/createIndexes.go | 16 +- .../x/mongo/driver/operation/delete.go | 28 + .../x/mongo/driver/operation/distinct.go | 30 +- .../x/mongo/driver/operation/drop_indexes.go | 16 +- .../x/mongo/driver/operation/find.go | 18 +- .../mongo/driver/operation/find_and_modify.go | 31 +- .../x/mongo/driver/operation/insert.go | 28 + .../x/mongo/driver/operation/listDatabases.go | 15 +- .../driver/operation/list_collections.go | 15 +- .../x/mongo/driver/operation/list_indexes.go | 17 +- .../x/mongo/driver/operation/update.go | 28 + .../x/mongo/driver/session/client_session.go | 2 +- .../x/mongo/driver/session/server_session.go | 2 +- .../x/mongo/driver/topology/connection.go | 3 + .../driver/topology/connection_legacy.go | 6 + .../driver/topology/connection_options.go | 6 + .../x/mongo/driver/topology/errors.go | 6 + .../x/mongo/driver/topology/fsm.go | 2 +- .../driver/topology/hanging_tls_conn_1_16.go | 37 - .../driver/topology/hanging_tls_conn_1_17.go | 44 - .../x/mongo/driver/topology/pool.go | 6 + .../x/mongo/driver/topology/rtt_monitor.go | 137 +- .../x/mongo/driver/topology/server.go | 7 +- .../x/mongo/driver/topology/server_options.go | 1 - .../x/mongo/driver/topology/topology.go | 5 +- .../x/mongo/driver/wiremessage/wiremessage.go | 22 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 349 ++ vendor/golang.org/x/sys/unix/ztypes_linux.go | 5 + vendor/golang.org/x/term/term.go | 10 +- vendor/golang.org/x/term/terminal.go | 2 +- .../natefinch/lumberjack.v2/.gitignore | 23 + .../natefinch/lumberjack.v2/.travis.yml | 6 + .../natefinch/lumberjack.v2/LICENSE} | 4 +- .../natefinch/lumberjack.v2/README.md | 179 + .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/lumberjack.go | 541 +++ vendor/gorm.io/driver/sqlite/ddlmod.go | 29 +- vendor/gorm.io/driver/sqlite/migrator.go | 9 +- vendor/gorm.io/driver/sqlite/sqlite.go | 6 +- vendor/gorm.io/driver/sqlserver/migrator.go | 137 +- vendor/k8s.io/klog/v2/README.md | 1 - vendor/k8s.io/klog/v2/contextual.go | 38 +- vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 + .../klog/v2/internal/serialize/keyvalues.go | 120 +- vendor/k8s.io/klog/v2/k8s_references.go | 64 + vendor/k8s.io/klog/v2/klog.go | 300 +- vendor/k8s.io/klog/v2/klogr.go | 8 +- .../k8s.io/kube-openapi/pkg/common/common.go | 2 +- .../pkg/validation/spec/gnostic.go | 1515 +++++++ vendor/modernc.org/fileutil/AUTHORS | 2 + vendor/modernc.org/fileutil/CONTRIBUTORS | 3 + vendor/modernc.org/fileutil/Makefile | 45 +- vendor/modernc.org/fileutil/fileutil.go | 135 + .../{fileutil_arm.go => fileutil_allarms.go} | 6 + .../modernc.org/fileutil/fileutil_darwin.go | 3 +- .../fileutil/fileutil_dragonfly.go | 3 +- .../modernc.org/fileutil/fileutil_freebsd.go | 3 +- vendor/modernc.org/fileutil/fileutil_linux.go | 3 +- .../modernc.org/fileutil/fileutil_netbsd.go | 3 +- .../modernc.org/fileutil/fileutil_solaris.go | 1 + vendor/modernc.org/internal/file/Makefile | 29 + vendor/modernc.org/internal/file/file.go | 15 +- vendor/modernc.org/libc/printf.go | 20 +- vendor/modules.txt | 149 +- .../internal/golang/encoding/json/decode.go | 50 +- .../internal/golang/encoding/json/encode.go | 34 +- .../internal/golang/encoding/json/fuzz.go | 9 +- .../golang/encoding/json/kubernetes_patch.go | 49 +- .../internal/golang/encoding/json/scanner.go | 2 +- .../internal/golang/encoding/json/stream.go | 8 +- .../internal/golang/encoding/json/tags.go | 16 +- vendor/sigs.k8s.io/json/json.go | 11 + vendor/sigs.k8s.io/yaml/.gitignore | 4 + vendor/sigs.k8s.io/yaml/.travis.yml | 7 +- vendor/sigs.k8s.io/yaml/README.md | 2 +- 444 files changed, 41488 insertions(+), 2968 deletions(-) create mode 100644 utils/dorm/yiigo.go create mode 100644 utils/dorm/yiigo_mongodb.go create mode 100644 utils/dorm/yiigo_mysql.go create mode 100644 utils/dorm/yiigo_redis.go create mode 100644 vendor/entgo.io/ent/dialect/dialect.go create mode 100644 vendor/entgo.io/ent/dialect/sql/builder.go create mode 100644 vendor/entgo.io/ent/dialect/sql/driver.go create mode 100644 vendor/entgo.io/ent/dialect/sql/scan.go delete mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_plan9.go delete mode 100644 vendor/github.com/emicklei/go-restful/.travis.yml delete mode 100644 vendor/github.com/emicklei/go-restful/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/.gitignore (99%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/.goconvey create mode 100644 vendor/github.com/emicklei/go-restful/v3/.travis.yml rename vendor/github.com/emicklei/go-restful/{ => v3}/CHANGES.md (74%) rename vendor/github.com/emicklei/go-restful/{ => v3}/LICENSE (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/README.md (76%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/SECURITY.md rename vendor/github.com/emicklei/go-restful/{ => v3}/Srcfile (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compress.go (92%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_cache.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_pools.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressors.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/constants.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/container.go (78%) rename vendor/github.com/emicklei/go-restful/{ => v3}/cors_filter.go (81%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly.go (93%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly_route.go (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/custom_verb.go rename vendor/github.com/emicklei/go-restful/{ => v3}/doc.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/entity_accessors.go (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/extensions.go rename vendor/github.com/emicklei/go-restful/{ => v3}/filter.go (79%) rename vendor/github.com/emicklei/go-restful/{ => v3}/json.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsoniter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsr311.go (89%) rename vendor/github.com/emicklei/go-restful/{ => v3}/log/log.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/logger.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/mime.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/options_filter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/parameter.go (58%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_expression.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_processor.go (79%) rename vendor/github.com/emicklei/go-restful/{ => v3}/request.go (85%) rename vendor/github.com/emicklei/go-restful/{ => v3}/response.go (96%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route.go (84%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route_builder.go (75%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/route_reader.go rename vendor/github.com/emicklei/go-restful/{ => v3}/router.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/service_error.go (70%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service.go (93%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service_container.go (100%) create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md rename vendor/github.com/{PuerkitoBio/urlesc => fsnotify/fsnotify}/LICENSE (95%) create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/file.go delete mode 100644 vendor/github.com/go-stack/stack/README.md delete mode 100644 vendor/github.com/go-stack/stack/stack.go create mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.pb.go create mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.proto create mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml create mode 100644 vendor/github.com/jmoiron/sqlx/.gitignore create mode 100644 vendor/github.com/jmoiron/sqlx/.travis.yml create mode 100644 vendor/github.com/jmoiron/sqlx/LICENSE create mode 100644 vendor/github.com/jmoiron/sqlx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/bind.go create mode 100644 vendor/github.com/jmoiron/sqlx/doc.go create mode 100644 vendor/github.com/jmoiron/sqlx/named.go create mode 100644 vendor/github.com/jmoiron/sqlx/named_context.go create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context.go create mode 100644 vendor/github.com/joho/godotenv/.gitignore create mode 100644 vendor/github.com/joho/godotenv/LICENCE create mode 100644 vendor/github.com/joho/godotenv/README.md create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/joho/godotenv/renovate.json delete mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/montanaflynn/stats/.gitignore create mode 100644 vendor/github.com/montanaflynn/stats/.travis.yml create mode 100644 vendor/github.com/montanaflynn/stats/CHANGELOG.md create mode 100644 vendor/github.com/montanaflynn/stats/DOCUMENTATION.md create mode 100644 vendor/github.com/montanaflynn/stats/LICENSE create mode 100644 vendor/github.com/montanaflynn/stats/Makefile create mode 100644 vendor/github.com/montanaflynn/stats/README.md create mode 100644 vendor/github.com/montanaflynn/stats/correlation.go create mode 100644 vendor/github.com/montanaflynn/stats/cumulative_sum.go create mode 100644 vendor/github.com/montanaflynn/stats/data.go create mode 100644 vendor/github.com/montanaflynn/stats/deviation.go create mode 100644 vendor/github.com/montanaflynn/stats/distances.go create mode 100644 vendor/github.com/montanaflynn/stats/doc.go create mode 100644 vendor/github.com/montanaflynn/stats/entropy.go create mode 100644 vendor/github.com/montanaflynn/stats/errors.go create mode 100644 vendor/github.com/montanaflynn/stats/legacy.go create mode 100644 vendor/github.com/montanaflynn/stats/load.go create mode 100644 vendor/github.com/montanaflynn/stats/max.go create mode 100644 vendor/github.com/montanaflynn/stats/mean.go create mode 100644 vendor/github.com/montanaflynn/stats/median.go create mode 100644 vendor/github.com/montanaflynn/stats/min.go create mode 100644 vendor/github.com/montanaflynn/stats/mode.go create mode 100644 vendor/github.com/montanaflynn/stats/norm.go create mode 100644 vendor/github.com/montanaflynn/stats/outlier.go create mode 100644 vendor/github.com/montanaflynn/stats/percentile.go create mode 100644 vendor/github.com/montanaflynn/stats/quartile.go create mode 100644 vendor/github.com/montanaflynn/stats/ranksum.go create mode 100644 vendor/github.com/montanaflynn/stats/regression.go create mode 100644 vendor/github.com/montanaflynn/stats/round.go create mode 100644 vendor/github.com/montanaflynn/stats/sample.go create mode 100644 vendor/github.com/montanaflynn/stats/sigmoid.go create mode 100644 vendor/github.com/montanaflynn/stats/softmax.go create mode 100644 vendor/github.com/montanaflynn/stats/sum.go create mode 100644 vendor/github.com/montanaflynn/stats/util.go create mode 100644 vendor/github.com/montanaflynn/stats/variance.go create mode 100644 vendor/github.com/nsqio/go-nsq/AUTHORS create mode 100644 vendor/github.com/nsqio/go-nsq/ChangeLog.md create mode 100644 vendor/github.com/nsqio/go-nsq/LICENSE create mode 100644 vendor/github.com/nsqio/go-nsq/README.md create mode 100644 vendor/github.com/nsqio/go-nsq/UPGRADING.md create mode 100644 vendor/github.com/nsqio/go-nsq/api_request.go create mode 100644 vendor/github.com/nsqio/go-nsq/command.go create mode 100644 vendor/github.com/nsqio/go-nsq/config.go create mode 100644 vendor/github.com/nsqio/go-nsq/config_flag.go create mode 100644 vendor/github.com/nsqio/go-nsq/conn.go create mode 100644 vendor/github.com/nsqio/go-nsq/consumer.go create mode 100644 vendor/github.com/nsqio/go-nsq/delegates.go create mode 100644 vendor/github.com/nsqio/go-nsq/doc.go create mode 100644 vendor/github.com/nsqio/go-nsq/errors.go create mode 100644 vendor/github.com/nsqio/go-nsq/message.go create mode 100644 vendor/github.com/nsqio/go-nsq/producer.go create mode 100644 vendor/github.com/nsqio/go-nsq/protocol.go create mode 100644 vendor/github.com/nsqio/go-nsq/states.go create mode 100644 vendor/github.com/nsqio/go-nsq/version.go create mode 100644 vendor/github.com/shenghui0779/vitess_pool/.gitignore create mode 100644 vendor/github.com/shenghui0779/vitess_pool/LICENSE create mode 100644 vendor/github.com/shenghui0779/vitess_pool/README.md create mode 100644 vendor/github.com/shenghui0779/vitess_pool/atomic.go create mode 100644 vendor/github.com/shenghui0779/vitess_pool/doc.go create mode 100644 vendor/github.com/shenghui0779/vitess_pool/pool.go create mode 100644 vendor/github.com/shenghui0779/vitess_pool/semaphore.go create mode 100644 vendor/github.com/shenghui0779/vitess_pool/timer.go create mode 100644 vendor/github.com/shenghui0779/yiigo/.gitignore create mode 100644 vendor/github.com/shenghui0779/yiigo/LICENSE create mode 100644 vendor/github.com/shenghui0779/yiigo/README.md create mode 100644 vendor/github.com/shenghui0779/yiigo/crypto.go create mode 100644 vendor/github.com/shenghui0779/yiigo/db.go create mode 100644 vendor/github.com/shenghui0779/yiigo/env.go create mode 100644 vendor/github.com/shenghui0779/yiigo/form.go create mode 100644 vendor/github.com/shenghui0779/yiigo/grpc.go create mode 100644 vendor/github.com/shenghui0779/yiigo/hash.go create mode 100644 vendor/github.com/shenghui0779/yiigo/helper.go create mode 100644 vendor/github.com/shenghui0779/yiigo/http.go create mode 100644 vendor/github.com/shenghui0779/yiigo/init.go create mode 100644 vendor/github.com/shenghui0779/yiigo/location.go create mode 100644 vendor/github.com/shenghui0779/yiigo/logger.go create mode 100644 vendor/github.com/shenghui0779/yiigo/mongo.go create mode 100644 vendor/github.com/shenghui0779/yiigo/mutex.go create mode 100644 vendor/github.com/shenghui0779/yiigo/nsq.go create mode 100644 vendor/github.com/shenghui0779/yiigo/redis.go create mode 100644 vendor/github.com/shenghui0779/yiigo/sql_builder.go create mode 100644 vendor/github.com/shenghui0779/yiigo/ssh.go create mode 100644 vendor/github.com/shenghui0779/yiigo/timingwheel.go create mode 100644 vendor/github.com/shenghui0779/yiigo/validator.go create mode 100644 vendor/github.com/shenghui0779/yiigo/websocket.go create mode 100644 vendor/github.com/tidwall/pretty/LICENSE create mode 100644 vendor/github.com/tidwall/pretty/README.md create mode 100644 vendor/github.com/tidwall/pretty/pretty.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/csot_util.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go rename vendor/go.mongodb.org/mongo-driver/{x/mongo/driver => internal}/uuid/uuid.go (54%) create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_16.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/hanging_tls_conn_1_17.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml rename vendor/{github.com/go-stack/stack/LICENSE.md => gopkg.in/natefinch/lumberjack.v2/LICENSE} (96%) create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go rename vendor/modernc.org/fileutil/{fileutil_arm.go => fileutil_allarms.go} (85%) diff --git a/go.mod b/go.mod index b7e57c3c..9a83d8df 100644 --- a/go.mod +++ b/go.mod @@ -8,14 +8,14 @@ require ( gitee.com/chunanyong/zorm v1.5.6 github.com/aliyun/aliyun-oss-go-sdk v2.2.4+incompatible github.com/allegro/bigcache/v3 v3.0.2 - github.com/aws/aws-sdk-go v1.44.54 - github.com/baidubce/bce-sdk-go v0.9.129 + github.com/aws/aws-sdk-go v1.44.57 + github.com/baidubce/bce-sdk-go v0.9.130 github.com/basgys/goxml2json v1.1.0 github.com/beego/beego/v2 v2.0.4 github.com/bmizerany/pq v0.0.0-20131128184720-da2b95e392c1 github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d - github.com/daodao97/fly v0.0.0-20220716071342-fd98e4b05d96 - github.com/denisenkom/go-mssqldb v0.12.0 + github.com/daodao97/fly v0.0.0-20220718020319-cee8ed04d19a + github.com/denisenkom/go-mssqldb v0.12.2 github.com/dgraph-io/ristretto v0.1.0 github.com/docker/docker v20.10.17+incompatible github.com/drone/drone-go v1.7.1 @@ -33,10 +33,11 @@ require ( github.com/gomodule/redigo v2.0.0+incompatible github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible github.com/jasonlvhit/gocron v0.0.1 + github.com/jmoiron/sqlx v1.3.5 github.com/kamva/mgm/v3 v3.4.1 github.com/ks3sdklib/aws-sdk-go v1.1.6 github.com/lesismal/sqlw v0.0.0-20220710073239-bd797c43fef9 - github.com/lib/pq v1.10.5 + github.com/lib/pq v1.10.6 github.com/lqs/sqlingo v0.11.1 github.com/mailru/go-clickhouse/v2 v2.0.0 github.com/mattn/go-sqlite3 v2.0.3+incompatible @@ -50,6 +51,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/rs/xid v1.4.0 github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda + github.com/shenghui0779/yiigo v1.6.6 github.com/shopspring/decimal v1.3.1 github.com/sirupsen/logrus v1.8.1 github.com/tencentyun/cos-go-sdk-v5 v0.7.35 @@ -64,10 +66,10 @@ require ( github.com/upyun/go-sdk/v3 v3.0.2 go.etcd.io/etcd/api/v3 v3.5.4 go.etcd.io/etcd/client/v3 v3.5.4 - go.mongodb.org/mongo-driver v1.9.1 + go.mongodb.org/mongo-driver v1.10.0 go.uber.org/zap v1.21.0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 + golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92 golang.org/x/text v0.3.7 google.golang.org/grpc v1.48.0 google.golang.org/protobuf v1.28.0 @@ -76,8 +78,8 @@ require ( gorm.io/datatypes v1.0.7 gorm.io/driver/mysql v1.3.5 gorm.io/driver/postgres v1.3.8 - gorm.io/driver/sqlite v1.3.1 - gorm.io/driver/sqlserver v1.3.1 + gorm.io/driver/sqlite v1.3.6 + gorm.io/driver/sqlserver v1.3.2 gorm.io/gorm v1.23.8 k8s.io/client-go v0.24.3 xorm.io/xorm v1.3.1 @@ -100,34 +102,36 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/emicklei/go-restful v2.16.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/glebarez/go-sqlite v1.17.3 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.21.1 // indirect github.com/go-rel/sql v0.11.0 // indirect - github.com/go-stack/stack v1.8.1 // indirect - github.com/goccy/go-json v0.9.8 // indirect + github.com/goccy/go-json v0.9.10 // indirect github.com/godror/knownpb v0.1.0 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect - github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 // indirect + github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/imdario/mergo v0.3.5 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.12.1 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -139,20 +143,23 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joho/godotenv v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.15.8 // indirect github.com/leodido/go-urn v1.2.1 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.9 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.6.6 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mozillazg/go-httpheader v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nsqio/go-nsq v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.2 // indirect @@ -162,11 +169,13 @@ require ( github.com/saracen/go7z-fixtures v0.0.0-20190623165746-aa6b8fba1d2f // indirect github.com/saracen/solidblock v0.0.0-20190426153529-45df20abab6f // indirect github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect + github.com/shenghui0779/vitess_pool v1.0.1 // indirect github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect + github.com/tidwall/pretty v1.2.0 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/ugorji/go/codec v1.2.7 // indirect github.com/ulikunitz/xz v0.5.10 // indirect @@ -182,12 +191,12 @@ require ( golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect - golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect golang.org/x/tools v0.1.11 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220713161829-9c7dac0a6568 // indirect + google.golang.org/genproto v0.0.0-20220718134204-073382fd740c // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect @@ -196,33 +205,33 @@ require ( gotest.tools/v3 v3.3.0 // indirect k8s.io/api v0.24.3 // indirect k8s.io/apimachinery v0.24.3 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect + k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e // indirect lukechampine.com/uint128 v1.2.0 // indirect mellium.im/sasl v0.2.1 // indirect modernc.org/b v1.0.2 // indirect modernc.org/cc/v3 v3.36.0 // indirect modernc.org/ccgo/v3 v3.16.7 // indirect - modernc.org/db v1.0.3 // indirect + modernc.org/db v1.0.4 // indirect modernc.org/file v1.0.3 // indirect - modernc.org/fileutil v1.0.0 // indirect + modernc.org/fileutil v1.1.0 // indirect modernc.org/golex v1.0.1 // indirect - modernc.org/internal v1.0.2 // indirect - modernc.org/libc v1.16.14 // indirect - modernc.org/lldb v1.0.2 // indirect + modernc.org/internal v1.0.4 // indirect + modernc.org/libc v1.16.17 // indirect + modernc.org/lldb v1.0.4 // indirect modernc.org/mathutil v1.4.1 // indirect modernc.org/memory v1.1.1 // indirect modernc.org/opt v0.1.3 // indirect - modernc.org/ql v1.4.0 // indirect + modernc.org/ql v1.4.1 // indirect modernc.org/sortutil v1.1.0 // indirect modernc.org/sqlite v1.17.3 // indirect modernc.org/strutil v1.1.2 // indirect modernc.org/token v1.0.0 // indirect - modernc.org/zappy v1.0.3 // indirect + modernc.org/zappy v1.0.5 // indirect muzzammil.xyz/jsonc v1.0.0 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect xorm.io/builder v0.3.12 // indirect ) diff --git a/go.sum b/go.sum index e88177b1..7da9d543 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/KscSDK/ksc-sdk-go v0.1.42/go.mod h1:isHlJZi429ff5JLemSc10h7nznNgzJAY4MmNM8u7SBo= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= @@ -68,6 +69,7 @@ github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0 github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -101,9 +103,13 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.42.27/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go v1.44.54 h1:zfFptZ0iLuk+psnRLk+o0NMgd/pBsALlNE7i40iOmFA= github.com/aws/aws-sdk-go v1.44.54/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.57 h1:Dx1QD+cA89LE0fVQWSov22tpnTa0znq2Feyaa/myVjg= +github.com/aws/aws-sdk-go v1.44.57/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baidubce/bce-sdk-go v0.9.129 h1:Q11ZtNGNOLqnJXpkl5l8+pnuflsOejJs7hJ09zFqiYg= github.com/baidubce/bce-sdk-go v0.9.129/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baidubce/bce-sdk-go v0.9.130 h1:5FYIocNnGKl72/+Ei+xyScl3Asmw7O+H9q8bv/H/ttI= +github.com/baidubce/bce-sdk-go v0.9.130/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= @@ -122,9 +128,12 @@ github.com/bmizerany/pq v0.0.0-20131128184720-da2b95e392c1 h1:1clOQIolnXGoH1SUo8 github.com/bmizerany/pq v0.0.0-20131128184720-da2b95e392c1/go.mod h1:YR6v6TjYGQnPky7rSf5U+AiQ4+EHIVmFYbhHUPo5L2U= github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d h1:pVrfxiGfwelyab6n21ZBkbkmbevaf+WvMIiR7sr97hw= github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -139,6 +148,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -161,6 +171,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/daodao97/fly v0.0.0-20220716071342-fd98e4b05d96 h1:PBSD5k0Geakg2WvRkE1JsNpUx34nSEUSbBbqCXw/c5M= github.com/daodao97/fly v0.0.0-20220716071342-fd98e4b05d96/go.mod h1:y04a64MRtXHe/AwdIkk1CJL/bBlCy1NdZqzv8GKygB8= +github.com/daodao97/fly v0.0.0-20220718020319-cee8ed04d19a h1:FfirfrBMiNkJCA66w2osBvPmDWz0Kah+Aet5uSEXNKI= +github.com/daodao97/fly v0.0.0-20220718020319-cee8ed04d19a/go.mod h1:y04a64MRtXHe/AwdIkk1CJL/bBlCy1NdZqzv8GKygB8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -170,6 +182,8 @@ github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27 github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= +github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= +github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -197,10 +211,16 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= +github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -208,12 +228,14 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -221,8 +243,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= @@ -256,9 +278,13 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -288,8 +314,6 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -317,6 +341,8 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/goccy/go-json v0.8.1/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.9.8 h1:DxXB6MLd6yyel7CLph8EwNIonUtVZd3Ue5iRcL4DQCE= github.com/goccy/go-json v0.9.8/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.10 h1:hCeNmprSNLB8B8vQKWl6DpuH0t60oEs+TAk9a7CScKc= +github.com/goccy/go-json v0.9.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.33.3 h1:GyjbWV0cKGDTRe1KClB1c+bCKXpn27UlIjwyNZs1qpg= github.com/godror/godror v0.33.3/go.mod h1:FbrZ7po7LyS3gUTdW/K1+kIoePvP/E044HpwptL4vqw= @@ -337,6 +363,8 @@ github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0kt github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -383,6 +411,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -403,6 +433,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -433,6 +465,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -451,6 +485,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -468,10 +504,13 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -497,6 +536,7 @@ github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5W github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= @@ -553,7 +593,11 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -610,8 +654,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= -github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lqs/sqlingo v0.11.1 h1:omkxWseD3kwr2nOI5SqB4NjHMbijD43R6Eni4GX2ekA= @@ -621,6 +665,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/go-clickhouse/v2 v2.0.0 h1:O+ZGJDwp/E5W19ooeouEqaOlg+qxA+4Zsfjt63QcnVU= github.com/mailru/go-clickhouse/v2 v2.0.0/go.mod h1:TwxN829KnFZ7jAka9l9EoCV+U0CBFq83SFev4oLbnNU= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= @@ -631,6 +677,8 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -641,6 +689,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= @@ -668,7 +717,10 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= @@ -694,6 +746,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nilorg/sdk v0.0.0-20220617065147-3001fb840741 h1:oqg84OxQrU/bdn22BOceI5ehavqCY3GsRUyp74UM8Cw= github.com/nilorg/sdk v0.0.0-20220617065147-3001fb840741/go.mod h1:X1swpPdqguAZaBDoEPyEWHSsJii0YQ1o+3piMv6W3JU= +github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -814,6 +868,10 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e h1:zWKUYT07mGmVBH+9UgnHXd/ekCK99C8EbDSAt5qsjXE= github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/shenghui0779/vitess_pool v1.0.1 h1:I7nxFpzVA1QSuJE9dL4MnKHc3CF5xKK/0MdjHhmImQI= +github.com/shenghui0779/vitess_pool v1.0.1/go.mod h1:vRwWHaeQvz/mrnNetj7v4R5WfAese3ZKZ1gyaFw3UHE= +github.com/shenghui0779/yiigo v1.6.6 h1:nPMzssuRGUi8gBJnzJuTOnMP762EQ68iomo3uo+ikh8= +github.com/shenghui0779/yiigo v1.6.6/go.mod h1:u1DR4AEjJriuXLwt/9BPnoQpwNY1iMdq4rLnz8Au73A= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 h1:DAYUYH5869yV94zvCES9F51oYtN5oGlwjxJJz7ZCnik= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -834,6 +892,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -868,11 +927,13 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4= github.com/tencentyun/cos-go-sdk-v5 v0.7.35 h1:XVk5GQ4eH1q+DBUJfpaMMdU9TJZWMjwNNwv0PG5nbLQ= github.com/tencentyun/cos-go-sdk-v5 v0.7.35/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= +github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= @@ -910,6 +971,9 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= @@ -931,8 +995,8 @@ go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mongodb.org/mongo-driver v1.9.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c= -go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1078,6 +1142,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1096,6 +1161,8 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92 h1:oVlhw3Oe+1reYsE2Nqu19PDJfLzwdU3QUUrG86rLK68= +golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1183,18 +1250,22 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew= -golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1360,8 +1431,11 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20220713161829-9c7dac0a6568 h1:iKx0VcikTdB4xj9Ho1Opn9AKzWFknYDE7oW/KBWZf9g= -google.golang.org/genproto v0.0.0-20220713161829-9c7dac0a6568/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9 h1:1aEQRgZ4Gks2SRAkLzIPpIszRazwVfjSFe1cKc+e0Jg= +google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220718134204-073382fd740c h1:xDUAhRezFnKF6wopxkOfdWYvz2XCiRQzndyDdpwFgbc= +google.golang.org/genproto v0.0.0-20220718134204-073382fd740c/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1387,7 +1461,7 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1444,6 +1518,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/datatypes v1.0.7 h1:8NhJN4+annFjwV1WufDhFiPjdUvV1lSGUdg1UCjQIWY= @@ -1456,9 +1531,14 @@ gorm.io/driver/postgres v1.3.8 h1:8bEphSAB69t3odsCR4NDzt581iZEWQuRM27Cg6KgfPY= gorm.io/driver/postgres v1.3.8/go.mod h1:qB98Aj6AhRO/oyu/jmZsi/YM9g6UzVCjMxO/6frFvcA= gorm.io/driver/sqlite v1.3.1 h1:bwfE+zTEWklBYoEodIOIBwuWHpnx52Z9zJFW5F33WLk= gorm.io/driver/sqlite v1.3.1/go.mod h1:wJx0hJspfycZ6myN38x1O/AqLtNS6c5o9TndewFbELg= +gorm.io/driver/sqlite v1.3.6 h1:Fi8xNYCUplOqWiPa3/GuCeowRNBRGTf62DEmhMDHeQQ= +gorm.io/driver/sqlite v1.3.6/go.mod h1:Sg1/pvnKtbQ7jLXxfZa+jSHvoX8hoZA8cn4xllOMTgE= gorm.io/driver/sqlserver v1.3.1 h1:F5t6ScMzOgy1zukRTIZgLZwKahgt3q1woAILVolKpOI= gorm.io/driver/sqlserver v1.3.1/go.mod h1:w25Vrx2BG+CJNUu/xKbFhaKlGxT/nzRkhWCCoptX8tQ= +gorm.io/driver/sqlserver v1.3.2 h1:yYt8f/xdAKLY7lCCyXxIUEgZ/WsURos3dHrx8MKFGAk= +gorm.io/driver/sqlserver v1.3.2/go.mod h1:w25Vrx2BG+CJNUu/xKbFhaKlGxT/nzRkhWCCoptX8tQ= gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= @@ -1484,11 +1564,17 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 h1:yEQKdMCjzAOvGeiTwG4hO/hNVNtDOuUFvMUZ0OlaIzs= +k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8/go.mod h1:mbJ+NSUoAhuR14N0S63bPkh8MGVSo3VYSGZtH/mfMe0= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e h1:W1yba+Bpkwb5BatGKZALQ1yylhwnuD6CkYmrTibyLDM= +k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= @@ -1558,10 +1644,14 @@ modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/db v1.0.3 h1:apxOlWU69je04bY22OT6J0RL23mzvUy22EgTAVyw+Yg= modernc.org/db v1.0.3/go.mod h1:L4ltUg8tu2pkSJk+fKaRrXs/3EdW79ZKYQ5PfVDT53U= +modernc.org/db v1.0.4 h1:EbCNg3ajWVZqQL8oEruFYuscbcnfxzgCIURyq7xFhPc= +modernc.org/db v1.0.4/go.mod h1:h14AWQVee2g0PDe7v6yO7zj6+SpBYRd4+X0oJnaK5LU= modernc.org/file v1.0.3 h1:McYGAMMuqjRp6ptmpcLr3r5yw3gNPsonFCAJ0tNK74U= modernc.org/file v1.0.3/go.mod h1:CNj/pwOfCtCbqiHcXDUlHBB2vWrzdaDCWdcnjtS1+XY= modernc.org/fileutil v1.0.0 h1:Z1AFLZwl6BO8A5NldQg/xTSjGLetp+1Ubvl4alfGx8w= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/fileutil v1.1.0 h1:jOk8xhf7A6+Sih7rSQppMgicuBxmZWvfymJqyDAud5s= +modernc.org/fileutil v1.1.0/go.mod h1:SErFOYfL50xApg0X5XfUOQiku7cTgQ6/8XNvJHKy9eI= modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= @@ -1569,6 +1659,9 @@ modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= modernc.org/internal v1.0.2 h1:Sn3+ojjMRnPaOR6jFISs6KAdRHnR4q9KNuwfKINKmZA= modernc.org/internal v1.0.2/go.mod h1:bycJAcev709ZU/47nil584PeBD+kbu8nv61ozeMso9E= +modernc.org/internal v1.0.3/go.mod h1:dvHFQEGEd33HZar0OdSYIm6yen/77eukCqffWSAwQUc= +modernc.org/internal v1.0.4 h1:U1cbGBExTV43aHmNac3OIzh1knnRBcLjGG+owT2wQ9k= +modernc.org/internal v1.0.4/go.mod h1:dvHFQEGEd33HZar0OdSYIm6yen/77eukCqffWSAwQUc= modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= @@ -1614,8 +1707,12 @@ modernc.org/libc v1.16.8/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/libc v1.16.10/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/libc v1.16.14 h1:MUIjk9Xwlkrp0BqGhMfRkiq0EkZsqfNiP4eixL3YiPk= modernc.org/libc v1.16.14/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.17 h1:rXo8IZJvP+QSN1KrlV23dtkM3XfGYXjx3RbLLzBtndM= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/lldb v1.0.2 h1:LBw58xVFl01OuM5U9++tLy3wmu+PoWok6T3dHuNjcZk= modernc.org/lldb v1.0.2/go.mod h1:ovbKqyzA9H/iPwHkAOH0qJbIQVT9rlijecenxDwVUi0= +modernc.org/lldb v1.0.4 h1:FWlxVwxUKjuyu1fz3rmXJb4plvsw5F83PYN72ulzFIQ= +modernc.org/lldb v1.0.4/go.mod h1:AKDI6wUJk7iJS8nRX54St8rq9wUIi3o5YGN3rlejR5o= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -1631,6 +1728,8 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/ql v1.4.0 h1:CqLAho+y4N8JwvqT7NJsYsp7YPwiRv6RE2n0n1ksSCU= modernc.org/ql v1.4.0/go.mod h1:q4c29Bgdx+iAtxx47ODW5Xo2X0PDkjSCK9NdQl6KFxc= +modernc.org/ql v1.4.1 h1:p0Bx3+AZ29YvJVNWkpKWa0dYaV+x8a2XABZDkw8GfqE= +modernc.org/ql v1.4.1/go.mod h1:c3RNgNK67+UYCyZXWf0tEYmBtWtGrg9M0IbAB5MAWS4= modernc.org/sortutil v1.1.0 h1:oP3U4uM+NT/qBQcbg/K2iqAX0Nx7B1b6YZtq3Gk/PjM= modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= modernc.org/sqlite v1.14.2/go.mod h1:yqfn85u8wVOE6ub5UT8VI9JjhrwBUUCNyTACN0h6Sx8= @@ -1649,6 +1748,8 @@ modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/zappy v1.0.3 h1:Tr+P3kclDSrvC6zYBW2hWmOmu5SjG6PtvCt3RCjRmss= modernc.org/zappy v1.0.3/go.mod h1:w/Akq8ipfols/xZJdR5IYiQNOqC80qz2mVvsEwEbkiI= +modernc.org/zappy v1.0.5 h1:XEh6U/ITG9I5Fgl9mBczbaOU7khNcS2+jPVaYlalif4= +modernc.org/zappy v1.0.5/go.mod h1:Q5T4ra3/JJNORGK16oe8rRAti7kWtRW4Z93fzin2gBc= muzzammil.xyz/jsonc v1.0.0 h1:B6kaT3wHueZ87mPz3q1nFuM1BlL32IG0wcq0/uOsQ18= muzzammil.xyz/jsonc v1.0.0/go.mod h1:rFv8tUUKe+QLh7v02BhfxXEf4ZHhYD7unR93HL/1Uvo= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1656,12 +1757,16 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/builder v0.3.12 h1:ASZYX7fQmy+o8UJdhlLHSW57JDOkM8DNhcAF5d0LiJM= diff --git a/utils/dorm/yiigo.go b/utils/dorm/yiigo.go new file mode 100644 index 00000000..84a37a2c --- /dev/null +++ b/utils/dorm/yiigo.go @@ -0,0 +1,21 @@ +package dorm + +import ( + "github.com/jmoiron/sqlx" + "github.com/shenghui0779/yiigo" + "go.mongodb.org/mongo-driver/mongo" +) + +type ConfigYiiGoClient struct { + Dns string + Addr string +} + +// YiiGoClient +// https://github.com/shenghui0779/yiigo +type YiiGoClient struct { + Db *sqlx.DB + MDb *mongo.Client + RedisPool *yiigo.RedisConn + config *ConfigYiiGoClient +} diff --git a/utils/dorm/yiigo_mongodb.go b/utils/dorm/yiigo_mongodb.go new file mode 100644 index 00000000..9bef8e29 --- /dev/null +++ b/utils/dorm/yiigo_mongodb.go @@ -0,0 +1,18 @@ +package dorm + +import ( + "github.com/shenghui0779/yiigo" +) + +func NewYiiGoMongoDbClient(config *ConfigYiiGoClient) (*YiiGoClient, error) { + + c := &YiiGoClient{config: config} + + yiigo.Init( + yiigo.WithMongo(yiigo.Default, c.config.Dns), + ) + + c.MDb = yiigo.Mongo() + + return c, nil +} diff --git a/utils/dorm/yiigo_mysql.go b/utils/dorm/yiigo_mysql.go new file mode 100644 index 00000000..a6ec085c --- /dev/null +++ b/utils/dorm/yiigo_mysql.go @@ -0,0 +1,27 @@ +package dorm + +import ( + "github.com/shenghui0779/yiigo" + "time" +) + +func NewYiiGoMysqlClient(config *ConfigYiiGoClient) (*YiiGoClient, error) { + + c := &YiiGoClient{config: config} + + yiigo.Init( + yiigo.WithMySQL(yiigo.Default, &yiigo.DBConfig{ + DSN: c.config.Dns, + Options: &yiigo.DBOptions{ + MaxOpenConns: 20, + MaxIdleConns: 10, + ConnMaxLifetime: 10 * time.Minute, + ConnMaxIdleTime: 5 * time.Minute, + }, + }), + ) + + c.Db = yiigo.DB() + + return c, nil +} diff --git a/utils/dorm/yiigo_redis.go b/utils/dorm/yiigo_redis.go new file mode 100644 index 00000000..9fa33b53 --- /dev/null +++ b/utils/dorm/yiigo_redis.go @@ -0,0 +1,26 @@ +package dorm + +import ( + "github.com/shenghui0779/yiigo" + "time" +) + +func NewYiiGoRedisClient(config *ConfigYiiGoClient) (*YiiGoClient, error) { + + c := &YiiGoClient{config: config} + + yiigo.Init( + yiigo.WithRedis(yiigo.Default, &yiigo.RedisConfig{ + Addr: c.config.Addr, + Options: &yiigo.RedisOptions{ + ConnTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, + }), + ) + + return c, nil +} diff --git a/vendor/entgo.io/ent/dialect/dialect.go b/vendor/entgo.io/ent/dialect/dialect.go new file mode 100644 index 00000000..95f07f95 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/dialect.go @@ -0,0 +1,208 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package dialect + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "log" + + "github.com/google/uuid" +) + +// Dialect names for external usage. +const ( + MySQL = "mysql" + SQLite = "sqlite3" + Postgres = "postgres" + Gremlin = "gremlin" +) + +// ExecQuerier wraps the 2 database operations. +type ExecQuerier interface { + // Exec executes a query that does not return records. For example, in SQL, INSERT or UPDATE. + // It scans the result into the pointer v. For SQL drivers, it is dialect/sql.Result. + Exec(ctx context.Context, query string, args, v interface{}) error + // Query executes a query that returns rows, typically a SELECT in SQL. + // It scans the result into the pointer v. For SQL drivers, it is *dialect/sql.Rows. + Query(ctx context.Context, query string, args, v interface{}) error +} + +// Driver is the interface that wraps all necessary operations for ent clients. +type Driver interface { + ExecQuerier + // Tx starts and returns a new transaction. + // The provided context is used until the transaction is committed or rolled back. + Tx(context.Context) (Tx, error) + // Close closes the underlying connection. + Close() error + // Dialect returns the dialect name of the driver. + Dialect() string +} + +// Tx wraps the Exec and Query operations in transaction. +type Tx interface { + ExecQuerier + driver.Tx +} + +type nopTx struct { + Driver +} + +func (nopTx) Commit() error { return nil } +func (nopTx) Rollback() error { return nil } + +// NopTx returns a Tx with a no-op Commit / Rollback methods wrapping +// the provided Driver d. +func NopTx(d Driver) Tx { + return nopTx{d} +} + +// DebugDriver is a driver that logs all driver operations. +type DebugDriver struct { + Driver // underlying driver. + log func(context.Context, ...interface{}) // log function. defaults to log.Println. +} + +// Debug gets a driver and an optional logging function, and returns +// a new debugged-driver that prints all outgoing operations. +func Debug(d Driver, logger ...func(...interface{})) Driver { + logf := log.Println + if len(logger) == 1 { + logf = logger[0] + } + drv := &DebugDriver{d, func(_ context.Context, v ...interface{}) { logf(v...) }} + return drv +} + +// DebugWithContext gets a driver and a logging function, and returns +// a new debugged-driver that prints all outgoing operations with context. +func DebugWithContext(d Driver, logger func(context.Context, ...interface{})) Driver { + drv := &DebugDriver{d, logger} + return drv +} + +// Exec logs its params and calls the underlying driver Exec method. +func (d *DebugDriver) Exec(ctx context.Context, query string, args, v interface{}) error { + d.log(ctx, fmt.Sprintf("driver.Exec: query=%v args=%v", query, args)) + return d.Driver.Exec(ctx, query, args, v) +} + +// ExecContext logs its params and calls the underlying driver ExecContext method if it is supported. +func (d *DebugDriver) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + drv, ok := d.Driver.(interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.ExecContext is not supported") + } + d.log(ctx, fmt.Sprintf("driver.ExecContext: query=%v args=%v", query, args)) + return drv.ExecContext(ctx, query, args...) +} + +// Query logs its params and calls the underlying driver Query method. +func (d *DebugDriver) Query(ctx context.Context, query string, args, v interface{}) error { + d.log(ctx, fmt.Sprintf("driver.Query: query=%v args=%v", query, args)) + return d.Driver.Query(ctx, query, args, v) +} + +// QueryContext logs its params and calls the underlying driver QueryContext method if it is supported. +func (d *DebugDriver) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + drv, ok := d.Driver.(interface { + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.QueryContext is not supported") + } + d.log(ctx, fmt.Sprintf("driver.QueryContext: query=%v args=%v", query, args)) + return drv.QueryContext(ctx, query, args...) +} + +// Tx adds an log-id for the transaction and calls the underlying driver Tx command. +func (d *DebugDriver) Tx(ctx context.Context) (Tx, error) { + tx, err := d.Driver.Tx(ctx) + if err != nil { + return nil, err + } + id := uuid.New().String() + d.log(ctx, fmt.Sprintf("driver.Tx(%s): started", id)) + return &DebugTx{tx, id, d.log, ctx}, nil +} + +// BeginTx adds an log-id for the transaction and calls the underlying driver BeginTx command if it is supported. +func (d *DebugDriver) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) { + drv, ok := d.Driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (Tx, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.BeginTx is not supported") + } + tx, err := drv.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + id := uuid.New().String() + d.log(ctx, fmt.Sprintf("driver.BeginTx(%s): started", id)) + return &DebugTx{tx, id, d.log, ctx}, nil +} + +// DebugTx is a transaction implementation that logs all transaction operations. +type DebugTx struct { + Tx // underlying transaction. + id string // transaction logging id. + log func(context.Context, ...interface{}) // log function. defaults to fmt.Println. + ctx context.Context // underlying transaction context. +} + +// Exec logs its params and calls the underlying transaction Exec method. +func (d *DebugTx) Exec(ctx context.Context, query string, args, v interface{}) error { + d.log(ctx, fmt.Sprintf("Tx(%s).Exec: query=%v args=%v", d.id, query, args)) + return d.Tx.Exec(ctx, query, args, v) +} + +// ExecContext logs its params and calls the underlying transaction ExecContext method if it is supported. +func (d *DebugTx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + drv, ok := d.Tx.(interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.ExecContext is not supported") + } + d.log(ctx, fmt.Sprintf("Tx(%s).ExecContext: query=%v args=%v", d.id, query, args)) + return drv.ExecContext(ctx, query, args...) +} + +// Query logs its params and calls the underlying transaction Query method. +func (d *DebugTx) Query(ctx context.Context, query string, args, v interface{}) error { + d.log(ctx, fmt.Sprintf("Tx(%s).Query: query=%v args=%v", d.id, query, args)) + return d.Tx.Query(ctx, query, args, v) +} + +// QueryContext logs its params and calls the underlying transaction QueryContext method if it is supported. +func (d *DebugTx) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + drv, ok := d.Tx.(interface { + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.QueryContext is not supported") + } + d.log(ctx, fmt.Sprintf("Tx(%s).QueryContext: query=%v args=%v", d.id, query, args)) + return drv.QueryContext(ctx, query, args...) +} + +// Commit logs this step and calls the underlying transaction Commit method. +func (d *DebugTx) Commit() error { + d.log(d.ctx, fmt.Sprintf("Tx(%s): committed", d.id)) + return d.Tx.Commit() +} + +// Rollback logs this step and calls the underlying transaction Rollback method. +func (d *DebugTx) Rollback() error { + d.log(d.ctx, fmt.Sprintf("Tx(%s): rollbacked", d.id)) + return d.Tx.Rollback() +} diff --git a/vendor/entgo.io/ent/dialect/sql/builder.go b/vendor/entgo.io/ent/dialect/sql/builder.go new file mode 100644 index 00000000..f8252775 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/builder.go @@ -0,0 +1,3645 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package sql provides wrappers around the standard database/sql package +// to allow the generated code to interact with a statically-typed API. +// +// Users that are interacting with this package should be aware that the +// following builders don't check the given SQL syntax nor validate or escape +// user-inputs. ~All validations are expected to be happened in the generated +// ent package. +package sql + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "strconv" + "strings" + + "entgo.io/ent/dialect" +) + +// Querier wraps the basic Query method that is implemented +// by the different builders in this file. +type Querier interface { + // Query returns the query representation of the element + // and its arguments (if any). + Query() (string, []interface{}) +} + +// querierErr allowed propagate Querier's inner error +type querierErr interface { + Err() error +} + +// ColumnBuilder is a builder for column definition in table creation. +type ColumnBuilder struct { + Builder + typ string // column type. + name string // column name. + attr string // extra attributes. + modify bool // modify existing. + fk *ForeignKeyBuilder // foreign-key constraint. + check func(*Builder) // column checks. +} + +// Column returns a new ColumnBuilder with the given name. +// +// sql.Column("group_id").Type("int").Attr("UNIQUE") +// +func Column(name string) *ColumnBuilder { return &ColumnBuilder{name: name} } + +// Type sets the column type. +func (c *ColumnBuilder) Type(t string) *ColumnBuilder { + c.typ = t + return c +} + +// Attr sets an extra attribute for the column, like UNIQUE or AUTO_INCREMENT. +func (c *ColumnBuilder) Attr(attr string) *ColumnBuilder { + if c.attr != "" && attr != "" { + c.attr += " " + } + c.attr += attr + return c +} + +// Constraint adds the CONSTRAINT clause to the ADD COLUMN statement in SQLite. +func (c *ColumnBuilder) Constraint(fk *ForeignKeyBuilder) *ColumnBuilder { + c.fk = fk + return c +} + +// Check adds a CHECK clause to the ADD COLUMN statement. +func (c *ColumnBuilder) Check(check func(*Builder)) *ColumnBuilder { + c.check = check + return c +} + +// Query returns query representation of a Column. +func (c *ColumnBuilder) Query() (string, []interface{}) { + c.Ident(c.name) + if c.typ != "" { + if c.postgres() && c.modify { + c.WriteString(" TYPE") + } + c.Pad().WriteString(c.typ) + } + if c.attr != "" { + c.Pad().WriteString(c.attr) + } + if c.fk != nil { + c.WriteString(" CONSTRAINT " + c.fk.symbol) + c.Pad().Join(c.fk.ref) + for _, action := range c.fk.actions { + c.Pad().WriteString(action) + } + } + if c.check != nil { + c.WriteString(" CHECK ") + c.Nested(c.check) + } + return c.String(), c.args +} + +// TableBuilder is a query builder for `CREATE TABLE` statement. +type TableBuilder struct { + Builder + name string // table name. + exists bool // check existence. + charset string // table charset. + collation string // table collation. + options string // table options. + columns []Querier // table columns. + primary []string // primary key. + constraints []Querier // foreign keys and indices. + checks []func(*Builder) // check constraints. +} + +// CreateTable returns a query builder for the `CREATE TABLE` statement. +// +// CreateTable("users"). +// Columns( +// Column("id").Type("int").Attr("auto_increment"), +// Column("name").Type("varchar(255)"), +// ). +// PrimaryKey("id") +// +func CreateTable(name string) *TableBuilder { return &TableBuilder{name: name} } + +// IfNotExists appends the `IF NOT EXISTS` clause to the `CREATE TABLE` statement. +func (t *TableBuilder) IfNotExists() *TableBuilder { + t.exists = true + return t +} + +// Column appends the given column to the `CREATE TABLE` statement. +func (t *TableBuilder) Column(c *ColumnBuilder) *TableBuilder { + t.columns = append(t.columns, c) + return t +} + +// Columns appends the a list of columns to the builder. +func (t *TableBuilder) Columns(columns ...*ColumnBuilder) *TableBuilder { + t.columns = make([]Querier, 0, len(columns)) + for i := range columns { + t.columns = append(t.columns, columns[i]) + } + return t +} + +// PrimaryKey adds a column to the primary-key constraint in the statement. +func (t *TableBuilder) PrimaryKey(column ...string) *TableBuilder { + t.primary = append(t.primary, column...) + return t +} + +// ForeignKeys adds a list of foreign-keys to the statement (without constraints). +func (t *TableBuilder) ForeignKeys(fks ...*ForeignKeyBuilder) *TableBuilder { + queries := make([]Querier, len(fks)) + for i := range fks { + // Erase the constraint symbol/name. + fks[i].symbol = "" + queries[i] = fks[i] + } + t.constraints = append(t.constraints, queries...) + return t +} + +// Constraints adds a list of foreign-key constraints to the statement. +func (t *TableBuilder) Constraints(fks ...*ForeignKeyBuilder) *TableBuilder { + queries := make([]Querier, len(fks)) + for i := range fks { + queries[i] = &Wrapper{"CONSTRAINT %s", fks[i]} + } + t.constraints = append(t.constraints, queries...) + return t +} + +// Checks adds CHECK clauses to the CREATE TABLE statement. +func (t *TableBuilder) Checks(checks ...func(*Builder)) *TableBuilder { + t.checks = append(t.checks, checks...) + return t +} + +// Charset appends the `CHARACTER SET` clause to the statement. MySQL only. +func (t *TableBuilder) Charset(s string) *TableBuilder { + t.charset = s + return t +} + +// Collate appends the `COLLATE` clause to the statement. MySQL only. +func (t *TableBuilder) Collate(s string) *TableBuilder { + t.collation = s + return t +} + +// Options appends additional options to to the statement (MySQL only). +func (t *TableBuilder) Options(s string) *TableBuilder { + t.options = s + return t +} + +// Query returns query representation of a `CREATE TABLE` statement. +// +// CREATE TABLE [IF NOT EXISTS] name +// (table definition) +// [charset and collation] +// +func (t *TableBuilder) Query() (string, []interface{}) { + t.WriteString("CREATE TABLE ") + if t.exists { + t.WriteString("IF NOT EXISTS ") + } + t.Ident(t.name) + t.Nested(func(b *Builder) { + b.JoinComma(t.columns...) + if len(t.primary) > 0 { + b.Comma().WriteString("PRIMARY KEY") + b.Nested(func(b *Builder) { + b.IdentComma(t.primary...) + }) + } + if len(t.constraints) > 0 { + b.Comma().JoinComma(t.constraints...) + } + for _, check := range t.checks { + check(b.Comma()) + } + }) + if t.charset != "" { + t.WriteString(" CHARACTER SET " + t.charset) + } + if t.collation != "" { + t.WriteString(" COLLATE " + t.collation) + } + if t.options != "" { + t.WriteString(" " + t.options) + } + return t.String(), t.args +} + +// DescribeBuilder is a query builder for `DESCRIBE` statement. +type DescribeBuilder struct { + Builder + name string // table name. +} + +// Describe returns a query builder for the `DESCRIBE` statement. +// +// Describe("users") +// +func Describe(name string) *DescribeBuilder { return &DescribeBuilder{name: name} } + +// Query returns query representation of a `DESCRIBE` statement. +func (t *DescribeBuilder) Query() (string, []interface{}) { + t.WriteString("DESCRIBE ") + t.Ident(t.name) + return t.String(), nil +} + +// TableAlter is a query builder for `ALTER TABLE` statement. +type TableAlter struct { + Builder + name string // table to alter. + Queries []Querier // columns and foreign-keys to add. +} + +// AlterTable returns a query builder for the `ALTER TABLE` statement. +// +// AlterTable("users"). +// AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). +// AddForeignKey(ForeignKey().Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")).OnDelete("CASCADE")), +// ) +// +func AlterTable(name string) *TableAlter { return &TableAlter{name: name} } + +// AddColumn appends the `ADD COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) AddColumn(c *ColumnBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"ADD COLUMN %s", c}) + return t +} + +// ModifyColumn appends the `MODIFY/ALTER COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) ModifyColumn(c *ColumnBuilder) *TableAlter { + switch { + case t.postgres(): + c.modify = true + t.Queries = append(t.Queries, &Wrapper{"ALTER COLUMN %s", c}) + default: + t.Queries = append(t.Queries, &Wrapper{"MODIFY COLUMN %s", c}) + } + return t +} + +// RenameColumn appends the `RENAME COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) RenameColumn(old, new string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("RENAME COLUMN %s TO %s", t.Quote(old), t.Quote(new)))) + return t +} + +// ModifyColumns calls ModifyColumn with each of the given builders. +func (t *TableAlter) ModifyColumns(cs ...*ColumnBuilder) *TableAlter { + for _, c := range cs { + t.ModifyColumn(c) + } + return t +} + +// DropColumn appends the `DROP COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropColumn(c *ColumnBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"DROP COLUMN %s", c}) + return t +} + +// ChangeColumn appends the `CHANGE COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) ChangeColumn(name string, c *ColumnBuilder) *TableAlter { + prefix := fmt.Sprintf("CHANGE COLUMN %s", t.Quote(name)) + t.Queries = append(t.Queries, &Wrapper{prefix + " %s", c}) + return t +} + +// RenameIndex appends the `RENAME INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) RenameIndex(curr, new string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("RENAME INDEX %s TO %s", t.Quote(curr), t.Quote(new)))) + return t +} + +// DropIndex appends the `DROP INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropIndex(name string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP INDEX %s", t.Quote(name)))) + return t +} + +// AddIndex appends the `ADD INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) AddIndex(idx *IndexBuilder) *TableAlter { + b := &Builder{dialect: t.dialect} + b.WriteString("ADD ") + if idx.unique { + b.WriteString("UNIQUE ") + } + b.WriteString("INDEX ") + b.Ident(idx.name) + b.Nested(func(b *Builder) { + b.IdentComma(idx.columns...) + }) + t.Queries = append(t.Queries, b) + return t +} + +// AddForeignKey adds a foreign key constraint to the `ALTER TABLE` statement. +func (t *TableAlter) AddForeignKey(fk *ForeignKeyBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"ADD CONSTRAINT %s", fk}) + return t +} + +// DropConstraint appends the `DROP CONSTRAINT` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropConstraint(ident string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP CONSTRAINT %s", t.Quote(ident)))) + return t +} + +// DropForeignKey appends the `DROP FOREIGN KEY` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropForeignKey(ident string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP FOREIGN KEY %s", t.Quote(ident)))) + return t +} + +// Query returns query representation of the `ALTER TABLE` statement. +// +// ALTER TABLE name +// [alter_specification] +// +func (t *TableAlter) Query() (string, []interface{}) { + t.WriteString("ALTER TABLE ") + t.Ident(t.name) + t.Pad() + t.JoinComma(t.Queries...) + return t.String(), t.args +} + +// IndexAlter is a query builder for `ALTER INDEX` statement. +type IndexAlter struct { + Builder + name string // index to alter. + Queries []Querier // alter options. +} + +// AlterIndex returns a query builder for the `ALTER INDEX` statement. +// +// AlterIndex("old_key"). +// Rename("new_key") +// +func AlterIndex(name string) *IndexAlter { return &IndexAlter{name: name} } + +// Rename appends the `RENAME TO` clause to the `ALTER INDEX` statement. +func (i *IndexAlter) Rename(name string) *IndexAlter { + i.Queries = append(i.Queries, Raw(fmt.Sprintf("RENAME TO %s", i.Quote(name)))) + return i +} + +// Query returns query representation of the `ALTER INDEX` statement. +// +// ALTER INDEX name +// [alter_specification] +// +func (i *IndexAlter) Query() (string, []interface{}) { + i.WriteString("ALTER INDEX ") + i.Ident(i.name) + i.Pad() + i.JoinComma(i.Queries...) + return i.String(), i.args +} + +// ForeignKeyBuilder is the builder for the foreign-key constraint clause. +type ForeignKeyBuilder struct { + Builder + symbol string + columns []string + actions []string + ref *ReferenceBuilder +} + +// ForeignKey returns a builder for the foreign-key constraint clause in create/alter table statements. +// +// ForeignKey(). +// Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")). +// OnDelete("CASCADE") +// +func ForeignKey(symbol ...string) *ForeignKeyBuilder { + fk := &ForeignKeyBuilder{} + if len(symbol) != 0 { + fk.symbol = symbol[0] + } + return fk +} + +// Symbol sets the symbol of the foreign key. +func (fk *ForeignKeyBuilder) Symbol(s string) *ForeignKeyBuilder { + fk.symbol = s + return fk +} + +// Columns sets the columns of the foreign key in the source table. +func (fk *ForeignKeyBuilder) Columns(s ...string) *ForeignKeyBuilder { + fk.columns = append(fk.columns, s...) + return fk +} + +// Reference sets the reference clause. +func (fk *ForeignKeyBuilder) Reference(r *ReferenceBuilder) *ForeignKeyBuilder { + fk.ref = r + return fk +} + +// OnDelete sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnDelete(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON DELETE "+action) + return fk +} + +// OnUpdate sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnUpdate(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON UPDATE "+action) + return fk +} + +// Query returns query representation of a foreign key constraint. +func (fk *ForeignKeyBuilder) Query() (string, []interface{}) { + if fk.symbol != "" { + fk.Ident(fk.symbol).Pad() + } + fk.WriteString("FOREIGN KEY") + fk.Nested(func(b *Builder) { + b.IdentComma(fk.columns...) + }) + fk.Pad().Join(fk.ref) + for _, action := range fk.actions { + fk.Pad().WriteString(action) + } + return fk.String(), fk.args +} + +// ReferenceBuilder is a builder for the reference clause in constraints. For example, in foreign key creation. +type ReferenceBuilder struct { + Builder + table string // referenced table. + columns []string // referenced columns. +} + +// Reference create a reference builder for the reference_option clause. +// +// Reference().Table("groups").Columns("id") +// +func Reference() *ReferenceBuilder { return &ReferenceBuilder{} } + +// Table sets the referenced table. +func (r *ReferenceBuilder) Table(s string) *ReferenceBuilder { + r.table = s + return r +} + +// Columns sets the columns of the referenced table. +func (r *ReferenceBuilder) Columns(s ...string) *ReferenceBuilder { + r.columns = append(r.columns, s...) + return r +} + +// Query returns query representation of a reference clause. +func (r *ReferenceBuilder) Query() (string, []interface{}) { + r.WriteString("REFERENCES ") + r.Ident(r.table) + r.Nested(func(b *Builder) { + b.IdentComma(r.columns...) + }) + return r.String(), r.args +} + +// IndexBuilder is a builder for `CREATE INDEX` statement. +type IndexBuilder struct { + Builder + name string + unique bool + exists bool + table string + method string + columns []string +} + +// CreateIndex creates a builder for the `CREATE INDEX` statement. +// +// CreateIndex("index_name"). +// Unique(). +// Table("users"). +// Column("name") +// +// Or: +// +// CreateIndex("index_name"). +// Unique(). +// Table("users"). +// Columns("name", "age") +// +func CreateIndex(name string) *IndexBuilder { + return &IndexBuilder{name: name} +} + +// IfNotExists appends the `IF NOT EXISTS` clause to the `CREATE INDEX` statement. +func (i *IndexBuilder) IfNotExists() *IndexBuilder { + i.exists = true + return i +} + +// Unique sets the index to be a unique index. +func (i *IndexBuilder) Unique() *IndexBuilder { + i.unique = true + return i +} + +// Table defines the table for the index. +func (i *IndexBuilder) Table(table string) *IndexBuilder { + i.table = table + return i +} + +// Using sets the method to create the index with. +func (i *IndexBuilder) Using(method string) *IndexBuilder { + i.method = method + return i +} + +// Column appends a column to the column list for the index. +func (i *IndexBuilder) Column(column string) *IndexBuilder { + i.columns = append(i.columns, column) + return i +} + +// Columns appends the given columns to the column list for the index. +func (i *IndexBuilder) Columns(columns ...string) *IndexBuilder { + i.columns = append(i.columns, columns...) + return i +} + +// Query returns query representation of a reference clause. +func (i *IndexBuilder) Query() (string, []interface{}) { + i.WriteString("CREATE ") + if i.unique { + i.WriteString("UNIQUE ") + } + i.WriteString("INDEX ") + if i.exists { + i.WriteString("IF NOT EXISTS ") + } + i.Ident(i.name) + i.WriteString(" ON ") + i.Ident(i.table) + switch i.dialect { + case dialect.Postgres: + if i.method != "" { + i.WriteString(" USING ").Ident(i.method) + } + i.Nested(func(b *Builder) { + b.IdentComma(i.columns...) + }) + case dialect.MySQL: + i.Nested(func(b *Builder) { + b.IdentComma(i.columns...) + }) + if i.method != "" { + i.WriteString(" USING " + i.method) + } + default: + i.Nested(func(b *Builder) { + b.IdentComma(i.columns...) + }) + } + return i.String(), nil +} + +// DropIndexBuilder is a builder for `DROP INDEX` statement. +type DropIndexBuilder struct { + Builder + name string + table string +} + +// DropIndex creates a builder for the `DROP INDEX` statement. +// +// MySQL: +// +// DropIndex("index_name"). +// Table("users"). +// +// SQLite/PostgreSQL: +// +// DropIndex("index_name") +// +func DropIndex(name string) *DropIndexBuilder { + return &DropIndexBuilder{name: name} +} + +// Table defines the table for the index. +func (d *DropIndexBuilder) Table(table string) *DropIndexBuilder { + d.table = table + return d +} + +// Query returns query representation of a reference clause. +// +// DROP INDEX index_name [ON table_name] +// +func (d *DropIndexBuilder) Query() (string, []interface{}) { + d.WriteString("DROP INDEX ") + d.Ident(d.name) + if d.table != "" { + d.WriteString(" ON ") + d.Ident(d.table) + } + return d.String(), nil +} + +// InsertBuilder is a builder for `INSERT INTO` statement. +type InsertBuilder struct { + Builder + table string + schema string + columns []string + defaults bool + returning []string + values [][]interface{} + conflict *conflict +} + +// Insert creates a builder for the `INSERT INTO` statement. +// +// Insert("users"). +// Columns("name", "age"). +// Values("a8m", 10). +// Values("foo", 20) +// +// Note: Insert inserts all values in one batch. +func Insert(table string) *InsertBuilder { return &InsertBuilder{table: table} } + +// Schema sets the database name for the insert table. +func (i *InsertBuilder) Schema(name string) *InsertBuilder { + i.schema = name + return i +} + +// Set is a syntactic sugar API for inserting only one row. +func (i *InsertBuilder) Set(column string, v interface{}) *InsertBuilder { + i.columns = append(i.columns, column) + if len(i.values) == 0 { + i.values = append(i.values, []interface{}{v}) + } else { + i.values[0] = append(i.values[0], v) + } + return i +} + +// Columns appends columns to the INSERT statement. +func (i *InsertBuilder) Columns(columns ...string) *InsertBuilder { + i.columns = append(i.columns, columns...) + return i +} + +// Values append a value tuple for the insert statement. +func (i *InsertBuilder) Values(values ...interface{}) *InsertBuilder { + i.values = append(i.values, values) + return i +} + +// Default sets the default values clause based on the dialect type. +func (i *InsertBuilder) Default() *InsertBuilder { + i.defaults = true + return i +} + +// Returning adds the `RETURNING` clause to the insert statement. PostgreSQL only. +func (i *InsertBuilder) Returning(columns ...string) *InsertBuilder { + i.returning = columns + return i +} + +type ( + // conflict holds the configuration for the + // `ON CONFLICT` / `ON DUPLICATE KEY` clause. + conflict struct { + target struct { + constraint string + columns []string + where *Predicate + } + action struct { + nothing bool + where *Predicate + update []func(*UpdateSet) + } + } + + // ConflictOption allows configuring the + // conflict config using functional options. + ConflictOption func(*conflict) +) + +// ConflictColumns sets the unique constraints that trigger the conflict +// resolution on insert to perform an upsert operation. The columns must +// have a unique constraint applied to trigger this behaviour. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues(), +// ) +// +func ConflictColumns(names ...string) ConflictOption { + return func(c *conflict) { + c.target.columns = names + } +} + +// ConflictConstraint allows setting the constraint +// name (i.e. `ON CONSTRAINT `) for PostgreSQL. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictConstraint("users_pkey"), +// sql.ResolveWithNewValues(), +// ) +// +func ConflictConstraint(name string) ConflictOption { + return func(c *conflict) { + c.target.constraint = name + } +} + +// ConflictWhere allows inference of partial unique indexes. See, PostgreSQL +// doc: https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT +func ConflictWhere(p *Predicate) ConflictOption { + return func(c *conflict) { + c.target.where = p + } +} + +// UpdateWhere allows setting the an update condition. Only rows +// for which this expression returns true will be updated. +func UpdateWhere(p *Predicate) ConflictOption { + return func(c *conflict) { + c.action.where = p + } +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported by SQLite and PostgreSQL. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.DoNothing() +// ) +// +func DoNothing() ConflictOption { + return func(c *conflict) { + c.action.nothing = true + } +} + +// ResolveWithIgnore sets each column to itself to force an update and return the ID, +// otherwise does not change any data. This may still trigger update hooks in the database. +// +// sql.Insert("users"). +// Columns("id"). +// Values(1). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithIgnore() +// ) +// +// // Output: +// // MySQL: INSERT INTO `users` (`id`) VALUES(1) ON DUPLICATE KEY UPDATE `id` = `users`.`id` +// // PostgreSQL: INSERT INTO "users" ("id") VALUES(1) ON CONFLICT ("id") DO UPDATE SET "id" = "users"."id +// +func ResolveWithIgnore() ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, func(u *UpdateSet) { + for _, c := range u.columns { + u.SetIgnore(c) + } + }) + } +} + +// ResolveWithNewValues updates columns using the new values proposed +// for insertion using the special EXCLUDED/VALUES table. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues() +// ) +// +// // Output: +// // MySQL: INSERT INTO `users` (`id`, `name`) VALUES(1, 'Mashraki) ON DUPLICATE KEY UPDATE `id` = VALUES(`id`), `name` = VALUES(`name`), +// // PostgreSQL: INSERT INTO "users" ("id") VALUES(1) ON CONFLICT ("id") DO UPDATE SET "id" = "excluded"."id, "name" = "excluded"."name" +// +func ResolveWithNewValues() ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, func(u *UpdateSet) { + for _, c := range u.columns { + u.SetExcluded(c) + } + }) + } +} + +// ResolveWith allows setting a custom function to set the `UPDATE` clause. +// +// Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// ConflictColumns("name"), +// ResolveWith(func(u *UpdateSet) { +// u.SetIgnore("id") +// u.SetNull("created_at") +// u.Set("name", Expr(u.Excluded().C("name"))) +// }), +// ) +// +func ResolveWith(fn func(*UpdateSet)) ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, fn) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues() +// ) +// +func (i *InsertBuilder) OnConflict(opts ...ConflictOption) *InsertBuilder { + if i.conflict == nil { + i.conflict = &conflict{} + } + for _, opt := range opts { + opt(i.conflict) + } + return i +} + +// UpdateSet describes a set of changes of the `DO UPDATE` clause. +type UpdateSet struct { + columns []string + update *UpdateBuilder +} + +// Table returns the table the `UPSERT` statement is executed on. +func (u *UpdateSet) Table() *SelectTable { + return Dialect(u.update.dialect).Table(u.update.table) +} + +// Columns returns all columns in the `INSERT` statement. +func (u *UpdateSet) Columns() []string { + return u.columns +} + +// UpdateColumns returns all columns in the `UPDATE` statement. +func (u *UpdateSet) UpdateColumns() []string { + return append(u.update.nulls, u.update.columns...) +} + +// Set sets a column to a given value. +func (u *UpdateSet) Set(column string, v interface{}) *UpdateSet { + u.update.Set(column, v) + return u +} + +// Add adds a numeric value to the given column. +func (u *UpdateSet) Add(column string, v interface{}) *UpdateSet { + u.update.Add(column, v) + return u +} + +// SetNull sets a column as null value. +func (u *UpdateSet) SetNull(column string) *UpdateSet { + u.update.SetNull(column) + return u +} + +// SetIgnore sets the column to itself. For example, "id" = "users"."id". +func (u *UpdateSet) SetIgnore(name string) *UpdateSet { + return u.Set(name, Expr(u.Table().C(name))) +} + +// SetExcluded sets the column name to its EXCLUDED/VALUES value. +// For example, "c" = "excluded"."c", or `c` = VALUES(`c`). +func (u *UpdateSet) SetExcluded(name string) *UpdateSet { + switch u.update.Dialect() { + case dialect.MySQL: + u.update.Set(name, ExprFunc(func(b *Builder) { + b.WriteString("VALUES(").Ident(name).WriteByte(')') + })) + default: + t := Dialect(u.update.dialect).Table("excluded") + u.update.Set(name, Expr(t.C(name))) + } + return u +} + +// Query returns query representation of an `INSERT INTO` statement. +func (i *InsertBuilder) Query() (string, []interface{}) { + i.WriteString("INSERT INTO ") + i.writeSchema(i.schema) + i.Ident(i.table).Pad() + if i.defaults && len(i.columns) == 0 { + i.writeDefault() + } else { + i.WriteByte('(').IdentComma(i.columns...).WriteByte(')') + i.WriteString(" VALUES ") + for j, v := range i.values { + if j > 0 { + i.Comma() + } + i.WriteByte('(').Args(v...).WriteByte(')') + } + } + if i.conflict != nil { + i.writeConflict() + } + if len(i.returning) > 0 && !i.mysql() { + i.WriteString(" RETURNING ") + i.IdentComma(i.returning...) + } + return i.String(), i.args +} + +func (i *InsertBuilder) writeDefault() { + switch i.Dialect() { + case dialect.MySQL: + i.WriteString("VALUES ()") + case dialect.SQLite, dialect.Postgres: + i.WriteString("DEFAULT VALUES") + } +} + +func (i *InsertBuilder) writeConflict() { + switch i.Dialect() { + case dialect.MySQL: + i.WriteString(" ON DUPLICATE KEY UPDATE ") + if i.conflict.action.nothing { + i.AddError(fmt.Errorf("invalid CONFLICT action ('DO NOTHING')")) + } + case dialect.SQLite, dialect.Postgres: + i.WriteString(" ON CONFLICT") + switch t := i.conflict.target; { + case t.constraint != "" && len(t.columns) != 0: + i.AddError(fmt.Errorf("duplicate CONFLICT clauses: %q, %q", t.constraint, t.columns)) + case t.constraint != "": + i.WriteString(" ON CONSTRAINT ").Ident(t.constraint) + case len(t.columns) != 0: + i.WriteString(" (").IdentComma(t.columns...).WriteByte(')') + } + if p := i.conflict.target.where; p != nil { + i.WriteString(" WHERE ").Join(p) + } + if i.conflict.action.nothing { + i.WriteString(" DO NOTHING") + return + } + i.WriteString(" DO UPDATE SET ") + } + if len(i.conflict.action.update) == 0 { + i.AddError(errors.New("missing action for 'DO UPDATE SET' clause")) + } + u := &UpdateSet{columns: i.columns, update: Dialect(i.dialect).Update(i.table)} + u.update.Builder = i.Builder + for _, f := range i.conflict.action.update { + f(u) + } + u.update.writeSetter(&i.Builder) + if p := i.conflict.action.where; p != nil { + p.qualifier = i.table + i.WriteString(" WHERE ").Join(p) + } +} + +// UpdateBuilder is a builder for `UPDATE` statement. +type UpdateBuilder struct { + Builder + table string + schema string + where *Predicate + nulls []string + columns []string + values []interface{} +} + +// Update creates a builder for the `UPDATE` statement. +// +// Update("users").Set("name", "foo").Set("age", 10) +// +func Update(table string) *UpdateBuilder { return &UpdateBuilder{table: table} } + +// Schema sets the database name for the updated table. +func (u *UpdateBuilder) Schema(name string) *UpdateBuilder { + u.schema = name + return u +} + +// Set sets a column to a given value. If `Set` was called before with +// the same column name, it overrides the value of the previous call. +func (u *UpdateBuilder) Set(column string, v interface{}) *UpdateBuilder { + for i := range u.columns { + if column == u.columns[i] { + u.values[i] = v + return u + } + } + u.columns = append(u.columns, column) + u.values = append(u.values, v) + return u +} + +// Add adds a numeric value to the given column. Note that, calling Set(c) +// after Add(c) will erase previous calls with c from the builder. +func (u *UpdateBuilder) Add(column string, v interface{}) *UpdateBuilder { + u.columns = append(u.columns, column) + u.values = append(u.values, ExprFunc(func(b *Builder) { + b.WriteString("COALESCE") + b.Nested(func(b *Builder) { + b.Ident(Table(u.table).C(column)).Comma().WriteByte('0') + }) + b.WriteString(" + ") + b.Arg(v) + })) + return u +} + +// SetNull sets a column as null value. +func (u *UpdateBuilder) SetNull(column string) *UpdateBuilder { + u.nulls = append(u.nulls, column) + return u +} + +// Where adds a where predicate for update statement. +func (u *UpdateBuilder) Where(p *Predicate) *UpdateBuilder { + if u.where != nil { + u.where = And(u.where, p) + } else { + u.where = p + } + return u +} + +// FromSelect makes it possible to update entities that match the sub-query. +func (u *UpdateBuilder) FromSelect(s *Selector) *UpdateBuilder { + u.Where(s.where) + if table, _ := s.from.(*SelectTable); table != nil { + u.table = table.name + } + return u +} + +// Empty reports whether this builder does not contain update changes. +func (u *UpdateBuilder) Empty() bool { + return len(u.columns) == 0 && len(u.nulls) == 0 +} + +// Query returns query representation of an `UPDATE` statement. +func (u *UpdateBuilder) Query() (string, []interface{}) { + b := u.Builder.clone() + b.WriteString("UPDATE ") + b.writeSchema(u.schema) + b.Ident(u.table).WriteString(" SET ") + u.writeSetter(&b) + if u.where != nil { + b.WriteString(" WHERE ") + b.Join(u.where) + } + return b.String(), b.args +} + +// writeSetter writes the "SET" clause for the UPDATE statement. +func (u *UpdateBuilder) writeSetter(b *Builder) { + for i, c := range u.nulls { + if i > 0 { + b.Comma() + } + b.Ident(c).WriteString(" = NULL") + } + if len(u.nulls) > 0 && len(u.columns) > 0 { + b.Comma() + } + for i, c := range u.columns { + if i > 0 { + b.Comma() + } + b.Ident(c).WriteString(" = ") + switch v := u.values[i].(type) { + case Querier: + b.Join(v) + default: + b.Arg(v) + } + } +} + +// DeleteBuilder is a builder for `DELETE` statement. +type DeleteBuilder struct { + Builder + table string + schema string + where *Predicate +} + +// Delete creates a builder for the `DELETE` statement. +// +// Delete("users"). +// Where( +// Or( +// EQ("name", "foo").And().EQ("age", 10), +// EQ("name", "bar").And().EQ("age", 20), +// And( +// EQ("name", "qux"), +// EQ("age", 1).Or().EQ("age", 2), +// ), +// ), +// ) +// +func Delete(table string) *DeleteBuilder { return &DeleteBuilder{table: table} } + +// Schema sets the database name for the table whose row will be deleted. +func (d *DeleteBuilder) Schema(name string) *DeleteBuilder { + d.schema = name + return d +} + +// Where appends a where predicate to the `DELETE` statement. +func (d *DeleteBuilder) Where(p *Predicate) *DeleteBuilder { + if d.where != nil { + d.where = And(d.where, p) + } else { + d.where = p + } + return d +} + +// FromSelect makes it possible to delete a sub query. +func (d *DeleteBuilder) FromSelect(s *Selector) *DeleteBuilder { + d.Where(s.where) + if table, _ := s.from.(*SelectTable); table != nil { + d.table = table.name + } + return d +} + +// Query returns query representation of a `DELETE` statement. +func (d *DeleteBuilder) Query() (string, []interface{}) { + d.WriteString("DELETE FROM ") + d.writeSchema(d.schema) + d.Ident(d.table) + if d.where != nil { + d.WriteString(" WHERE ") + d.Join(d.where) + } + return d.String(), d.args +} + +// Predicate is a where predicate. +type Predicate struct { + Builder + depth int + fns []func(*Builder) +} + +// P creates a new predicate. +// +// P().EQ("name", "a8m").And().EQ("age", 30) +// +func P(fns ...func(*Builder)) *Predicate { + return &Predicate{fns: fns} +} + +// ExprP creates a new predicate from the given expression. +// +// ExprP("A = ? AND B > ?", args...) +// +func ExprP(exr string, args ...interface{}) *Predicate { + return P(func(b *Builder) { + b.Join(Expr(exr, args...)) + }) +} + +// Or combines all given predicates with OR between them. +// +// Or(EQ("name", "foo"), EQ("name", "bar")) +// +func Or(preds ...*Predicate) *Predicate { + p := P() + return p.Append(func(b *Builder) { + p.mayWrap(preds, b, "OR") + }) +} + +// False appends the FALSE keyword to the predicate. +// +// Delete().From("users").Where(False()) +// +func False() *Predicate { + return P().False() +} + +// False appends FALSE to the predicate. +func (p *Predicate) False() *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("FALSE") + }) +} + +// Not wraps the given predicate with the not predicate. +// +// Not(Or(EQ("name", "foo"), EQ("name", "bar"))) +// +func Not(pred *Predicate) *Predicate { + return P().Not().Append(func(b *Builder) { + b.Nested(func(b *Builder) { + b.Join(pred) + }) + }) +} + +// Not appends NOT to the predicate. +func (p *Predicate) Not() *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT ") + }) +} + +// ColumnsOp returns a new predicate between 2 columns. +func ColumnsOp(col1, col2 string, op Op) *Predicate { + return P().ColumnsOp(col1, col2, op) +} + +// ColumnsOp appends the given predicate between 2 columns. +func (p *Predicate) ColumnsOp(col1, col2 string, op Op) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col1) + b.WriteOp(op) + b.Ident(col2) + }) +} + +// And combines all given predicates with AND between them. +func And(preds ...*Predicate) *Predicate { + p := P() + return p.Append(func(b *Builder) { + p.mayWrap(preds, b, "AND") + }) +} + +// IsTrue appends a predicate that checks if the column value is truthy. +func IsTrue(col string) *Predicate { + return P().IsTrue(col) +} + +// IsTrue appends a predicate that checks if the column value is truthy. +func (p *Predicate) IsTrue(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + }) +} + +// IsFalse appends a predicate that checks if the column value is falsey. +func IsFalse(col string) *Predicate { + return P().IsFalse(col) +} + +// IsFalse appends a predicate that checks if the column value is falsey. +func (p *Predicate) IsFalse(col string) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT ").Ident(col) + }) +} + +// EQ returns a "=" predicate. +func EQ(col string, value interface{}) *Predicate { + return P().EQ(col, value) +} + +// EQ appends a "=" predicate. +func (p *Predicate) EQ(col string, arg interface{}) *Predicate { + // A small optimization to avoid passing + // arguments when it can be avoided. + switch arg := arg.(type) { + case bool: + if arg { + return IsTrue(col) + } + return IsFalse(col) + default: + return p.Append(func(b *Builder) { + b.Ident(col) + b.WriteOp(OpEQ) + p.arg(b, arg) + }) + } +} + +// ColumnsEQ appends a "=" predicate between 2 columns. +func ColumnsEQ(col1, col2 string) *Predicate { + return P().ColumnsEQ(col1, col2) +} + +// ColumnsEQ appends a "=" predicate between 2 columns. +func (p *Predicate) ColumnsEQ(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpEQ) +} + +// NEQ returns a "<>" predicate. +func NEQ(col string, value interface{}) *Predicate { + return P().NEQ(col, value) +} + +// NEQ appends a "<>" predicate. +func (p *Predicate) NEQ(col string, arg interface{}) *Predicate { + // A small optimization to avoid passing + // arguments when it can be avoided. + switch arg := arg.(type) { + case bool: + if arg { + return IsFalse(col) + } + return IsTrue(col) + default: + return p.Append(func(b *Builder) { + b.Ident(col) + b.WriteOp(OpNEQ) + p.arg(b, arg) + }) + } +} + +// ColumnsNEQ appends a "<>" predicate between 2 columns. +func ColumnsNEQ(col1, col2 string) *Predicate { + return P().ColumnsNEQ(col1, col2) +} + +// ColumnsNEQ appends a "<>" predicate between 2 columns. +func (p *Predicate) ColumnsNEQ(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpNEQ) +} + +// LT returns a "<" predicate. +func LT(col string, value interface{}) *Predicate { + return P().LT(col, value) +} + +// LT appends a "<" predicate. +func (p *Predicate) LT(col string, arg interface{}) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpLT) + p.arg(b, arg) + }) +} + +// ColumnsLT appends a "<" predicate between 2 columns. +func ColumnsLT(col1, col2 string) *Predicate { + return P().ColumnsLT(col1, col2) +} + +// ColumnsLT appends a "<" predicate between 2 columns. +func (p *Predicate) ColumnsLT(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpLT) +} + +// LTE returns a "<=" predicate. +func LTE(col string, value interface{}) *Predicate { + return P().LTE(col, value) +} + +// LTE appends a "<=" predicate. +func (p *Predicate) LTE(col string, arg interface{}) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpLTE) + p.arg(b, arg) + }) +} + +// ColumnsLTE appends a "<=" predicate between 2 columns. +func ColumnsLTE(col1, col2 string) *Predicate { + return P().ColumnsLTE(col1, col2) +} + +// ColumnsLTE appends a "<=" predicate between 2 columns. +func (p *Predicate) ColumnsLTE(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpLTE) +} + +// GT returns a ">" predicate. +func GT(col string, value interface{}) *Predicate { + return P().GT(col, value) +} + +// GT appends a ">" predicate. +func (p *Predicate) GT(col string, arg interface{}) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpGT) + p.arg(b, arg) + }) +} + +// ColumnsGT appends a ">" predicate between 2 columns. +func ColumnsGT(col1, col2 string) *Predicate { + return P().ColumnsGT(col1, col2) +} + +// ColumnsGT appends a ">" predicate between 2 columns. +func (p *Predicate) ColumnsGT(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpGT) +} + +// GTE returns a ">=" predicate. +func GTE(col string, value interface{}) *Predicate { + return P().GTE(col, value) +} + +// GTE appends a ">=" predicate. +func (p *Predicate) GTE(col string, arg interface{}) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpGTE) + p.arg(b, arg) + }) +} + +// ColumnsGTE appends a ">=" predicate between 2 columns. +func ColumnsGTE(col1, col2 string) *Predicate { + return P().ColumnsGTE(col1, col2) +} + +// ColumnsGTE appends a ">=" predicate between 2 columns. +func (p *Predicate) ColumnsGTE(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpGTE) +} + +// NotNull returns the `IS NOT NULL` predicate. +func NotNull(col string) *Predicate { + return P().NotNull(col) +} + +// NotNull appends the `IS NOT NULL` predicate. +func (p *Predicate) NotNull(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteString(" IS NOT NULL") + }) +} + +// IsNull returns the `IS NULL` predicate. +func IsNull(col string) *Predicate { + return P().IsNull(col) +} + +// IsNull appends the `IS NULL` predicate. +func (p *Predicate) IsNull(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteString(" IS NULL") + }) +} + +// In returns the `IN` predicate. +func In(col string, args ...interface{}) *Predicate { + return P().In(col, args...) +} + +// In appends the `IN` predicate. +func (p *Predicate) In(col string, args ...interface{}) *Predicate { + // If no arguments were provided, append the FALSE constant, since + // we cannot apply "IN ()". This will make this predicate falsy. + if len(args) == 0 { + return p.False() + } + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpIn) + b.Nested(func(b *Builder) { + if s, ok := args[0].(*Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + }) +} + +// InInts returns the `IN` predicate for ints. +func InInts(col string, args ...int) *Predicate { + return P().InInts(col, args...) +} + +// InValues adds the `IN` predicate for slice of driver.Value. +func InValues(col string, args ...driver.Value) *Predicate { + return P().InValues(col, args...) +} + +// InInts adds the `IN` predicate for ints. +func (p *Predicate) InInts(col string, args ...int) *Predicate { + iface := make([]interface{}, len(args)) + for i := range args { + iface[i] = args[i] + } + return p.In(col, iface...) +} + +// InValues adds the `IN` predicate for slice of driver.Value. +func (p *Predicate) InValues(col string, args ...driver.Value) *Predicate { + iface := make([]interface{}, len(args)) + for i := range args { + iface[i] = args[i] + } + return p.In(col, iface...) +} + +// NotIn returns the `Not IN` predicate. +func NotIn(col string, args ...interface{}) *Predicate { + return P().NotIn(col, args...) +} + +// NotIn appends the `Not IN` predicate. +func (p *Predicate) NotIn(col string, args ...interface{}) *Predicate { + if len(args) == 0 { + return p + } + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpNotIn) + b.Nested(func(b *Builder) { + if s, ok := args[0].(*Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + }) +} + +// Exists returns the `Exists` predicate. +func Exists(query Querier) *Predicate { + return P().Exists(query) +} + +// Exists appends the `EXISTS` predicate with the given query. +func (p *Predicate) Exists(query Querier) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("EXISTS ") + b.Nested(func(b *Builder) { + b.Join(query) + }) + }) +} + +// NotExists returns the `NotExists` predicate. +func NotExists(query Querier) *Predicate { + return P().NotExists(query) +} + +// NotExists appends the `NOT EXISTS` predicate with the given query. +func (p *Predicate) NotExists(query Querier) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT EXISTS ") + b.Nested(func(b *Builder) { + b.Join(query) + }) + }) +} + +// Like returns the `LIKE` predicate. +func Like(col, pattern string) *Predicate { + return P().Like(col, pattern) +} + +// Like appends the `LIKE` predicate. +func (p *Predicate) Like(col, pattern string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpLike) + b.Arg(pattern) + }) +} + +// escape escapes w with the default escape character ('/'), +// to be used by the pattern matching functions below. +// The second return value indicates if w was escaped or not. +func escape(w string) (string, bool) { + var n int + for i := range w { + if c := w[i]; c == '%' || c == '_' || c == '\\' { + n++ + } + } + // No characters to escape. + if n == 0 { + return w, false + } + var b strings.Builder + b.Grow(len(w) + n) + for i := range w { + if c := w[i]; c == '%' || c == '_' || c == '\\' { + b.WriteByte('\\') + } + b.WriteByte(w[i]) + } + return b.String(), true +} + +func (p *Predicate) escapedLike(col, left, right, word string) *Predicate { + return p.Append(func(b *Builder) { + w, escaped := escape(word) + b.Ident(col).WriteOp(OpLike) + b.Arg(left + w + right) + if p.dialect == dialect.SQLite && escaped { + p.WriteString(" ESCAPE ").Arg("\\") + } + }) +} + +// HasPrefix is a helper predicate that checks prefix using the LIKE predicate. +func HasPrefix(col, prefix string) *Predicate { + return P().HasPrefix(col, prefix) +} + +// HasPrefix is a helper predicate that checks prefix using the LIKE predicate. +func (p *Predicate) HasPrefix(col, prefix string) *Predicate { + return p.escapedLike(col, "", "%", prefix) +} + +// HasSuffix is a helper predicate that checks suffix using the LIKE predicate. +func HasSuffix(col, suffix string) *Predicate { return P().HasSuffix(col, suffix) } + +// HasSuffix is a helper predicate that checks suffix using the LIKE predicate. +func (p *Predicate) HasSuffix(col, suffix string) *Predicate { + return p.escapedLike(col, "%", "", suffix) +} + +// EqualFold is a helper predicate that applies the "=" predicate with case-folding. +func EqualFold(col, sub string) *Predicate { return P().EqualFold(col, sub) } + +// EqualFold is a helper predicate that applies the "=" predicate with case-folding. +func (p *Predicate) EqualFold(col, sub string) *Predicate { + return p.Append(func(b *Builder) { + f := &Func{} + f.SetDialect(b.dialect) + switch b.dialect { + case dialect.MySQL: + // We assume the CHARACTER SET is configured to utf8mb4, + // because this how it is defined in dialect/sql/schema. + b.Ident(col).WriteString(" COLLATE utf8mb4_general_ci = ") + case dialect.Postgres: + b.Ident(col).WriteString(" ILIKE ") + default: // SQLite. + f.Lower(col) + b.WriteString(f.String()) + b.WriteOp(OpEQ) + } + b.Arg(strings.ToLower(sub)) + }) +} + +// Contains is a helper predicate that checks substring using the LIKE predicate. +func Contains(col, sub string) *Predicate { return P().Contains(col, sub) } + +// Contains is a helper predicate that checks substring using the LIKE predicate. +func (p *Predicate) Contains(col, substr string) *Predicate { + return p.escapedLike(col, "%", "%", substr) +} + +// ContainsFold is a helper predicate that checks substring using the LIKE predicate. +func ContainsFold(col, sub string) *Predicate { return P().ContainsFold(col, sub) } + +// ContainsFold is a helper predicate that applies the LIKE predicate with case-folding. +func (p *Predicate) ContainsFold(col, substr string) *Predicate { + return p.Append(func(b *Builder) { + w, escaped := escape(substr) + switch b.dialect { + case dialect.MySQL: + // We assume the CHARACTER SET is configured to utf8mb4, + // because this how it is defined in dialect/sql/schema. + b.Ident(col).WriteString(" COLLATE utf8mb4_general_ci LIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + case dialect.Postgres: + b.Ident(col).WriteString(" ILIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + default: // SQLite. + var f Func + f.SetDialect(b.dialect) + f.Lower(col) + b.WriteString(f.String()).WriteString(" LIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + if escaped { + p.WriteString(" ESCAPE ").Arg("\\") + } + } + }) +} + +// CompositeGT returns a composite ">" predicate +func CompositeGT(columns []string, args ...interface{}) *Predicate { + return P().CompositeGT(columns, args...) +} + +// CompositeLT returns a composite "<" predicate +func CompositeLT(columns []string, args ...interface{}) *Predicate { + return P().CompositeLT(columns, args...) +} + +func (p *Predicate) compositeP(operator string, columns []string, args ...interface{}) *Predicate { + return p.Append(func(b *Builder) { + b.Nested(func(nb *Builder) { + nb.IdentComma(columns...) + }) + b.WriteString(operator) + b.WriteString("(") + b.Args(args...) + b.WriteString(")") + }) +} + +// CompositeGT returns a composite ">" predicate. +func (p *Predicate) CompositeGT(columns []string, args ...interface{}) *Predicate { + const operator = " > " + return p.compositeP(operator, columns, args...) +} + +// CompositeLT appends a composite "<" predicate. +func (p *Predicate) CompositeLT(columns []string, args ...interface{}) *Predicate { + const operator = " < " + return p.compositeP(operator, columns, args...) +} + +// Append appends a new function to the predicate callbacks. +// The callback list are executed on call to Query. +func (p *Predicate) Append(f func(*Builder)) *Predicate { + p.fns = append(p.fns, f) + return p +} + +// Query returns query representation of a predicate. +func (p *Predicate) Query() (string, []interface{}) { + if p.Len() > 0 || len(p.args) > 0 { + p.Reset() + p.args = nil + } + for _, f := range p.fns { + f(&p.Builder) + } + return p.String(), p.args +} + +// arg calls Builder.Arg, but wraps `a` with parens in case of a Selector. +func (*Predicate) arg(b *Builder, a interface{}) { + switch a.(type) { + case *Selector: + b.Nested(func(b *Builder) { + b.Arg(a) + }) + default: + b.Arg(a) + } +} + +// clone returns a shallow clone of p. +func (p *Predicate) clone() *Predicate { + if p == nil { + return p + } + return &Predicate{fns: append([]func(*Builder){}, p.fns...)} +} + +func (p *Predicate) mayWrap(preds []*Predicate, b *Builder, op string) { + switch n := len(preds); { + case n == 1: + b.Join(preds[0]) + return + case n > 1 && p.depth != 0: + b.WriteByte('(') + defer b.WriteByte(')') + } + for i := range preds { + preds[i].depth = p.depth + 1 + if i > 0 { + b.WriteByte(' ') + b.WriteString(op) + b.WriteByte(' ') + } + if len(preds[i].fns) > 1 { + b.Nested(func(b *Builder) { + b.Join(preds[i]) + }) + } else { + b.Join(preds[i]) + } + } +} + +// Func represents an SQL function. +type Func struct { + Builder + fns []func(*Builder) +} + +// Lower wraps the given column with the LOWER function. +// +// P().EQ(sql.Lower("name"), "a8m") +// +func Lower(ident string) string { + f := &Func{} + f.Lower(ident) + return f.String() +} + +// Lower wraps the given ident with the LOWER function. +func (f *Func) Lower(ident string) { + f.byName("LOWER", ident) +} + +// Count wraps the ident with the COUNT aggregation function. +func Count(ident string) string { + f := &Func{} + f.Count(ident) + return f.String() +} + +// Count wraps the ident with the COUNT aggregation function. +func (f *Func) Count(ident string) { + f.byName("COUNT", ident) +} + +// Max wraps the ident with the MAX aggregation function. +func Max(ident string) string { + f := &Func{} + f.Max(ident) + return f.String() +} + +// Max wraps the ident with the MAX aggregation function. +func (f *Func) Max(ident string) { + f.byName("MAX", ident) +} + +// Min wraps the ident with the MIN aggregation function. +func Min(ident string) string { + f := &Func{} + f.Min(ident) + return f.String() +} + +// Min wraps the ident with the MIN aggregation function. +func (f *Func) Min(ident string) { + f.byName("MIN", ident) +} + +// Sum wraps the ident with the SUM aggregation function. +func Sum(ident string) string { + f := &Func{} + f.Sum(ident) + return f.String() +} + +// Sum wraps the ident with the SUM aggregation function. +func (f *Func) Sum(ident string) { + f.byName("SUM", ident) +} + +// Avg wraps the ident with the AVG aggregation function. +func Avg(ident string) string { + f := &Func{} + f.Avg(ident) + return f.String() +} + +// Avg wraps the ident with the AVG aggregation function. +func (f *Func) Avg(ident string) { + f.byName("AVG", ident) +} + +// byName wraps an identifier with a function name. +func (f *Func) byName(fn, ident string) { + f.Append(func(b *Builder) { + f.WriteString(fn) + f.Nested(func(b *Builder) { + b.Ident(ident) + }) + }) +} + +// Append appends a new function to the function callbacks. +// The callback list are executed on call to String. +func (f *Func) Append(fn func(*Builder)) *Func { + f.fns = append(f.fns, fn) + return f +} + +// String implements the fmt.Stringer. +func (f *Func) String() string { + for _, fn := range f.fns { + fn(&f.Builder) + } + return f.Builder.String() +} + +// As suffixed the given column with an alias (`a` AS `b`). +func As(ident string, as string) string { + b := &Builder{} + b.fromIdent(ident) + b.Ident(ident).Pad().WriteString("AS") + b.Pad().Ident(as) + return b.String() +} + +// Distinct prefixed the given columns with the `DISTINCT` keyword (DISTINCT `id`). +func Distinct(idents ...string) string { + b := &Builder{} + if len(idents) > 0 { + b.fromIdent(idents[0]) + } + b.WriteString("DISTINCT") + b.Pad().IdentComma(idents...) + return b.String() +} + +// TableView is a view that returns a table view. Can be a Table, Selector or a View (WITH statement). +type TableView interface { + view() +} + +// SelectTable is a table selector. +type SelectTable struct { + Builder + as string + name string + schema string + quote bool +} + +// Table returns a new table selector. +// +// t1 := Table("users").As("u") +// return Select(t1.C("name")) +// +func Table(name string) *SelectTable { + return &SelectTable{quote: true, name: name} +} + +// Schema sets the schema name of the table. +func (s *SelectTable) Schema(name string) *SelectTable { + s.schema = name + return s +} + +// As adds the AS clause to the table selector. +func (s *SelectTable) As(alias string) *SelectTable { + s.as = alias + return s +} + +// C returns a formatted string for the table column. +func (s *SelectTable) C(column string) string { + name := s.name + if s.as != "" { + name = s.as + } + b := &Builder{dialect: s.dialect} + if s.as == "" { + b.writeSchema(s.schema) + } + b.Ident(name).WriteByte('.').Ident(column) + return b.String() +} + +// Columns returns a list of formatted strings for the table columns. +func (s *SelectTable) Columns(columns ...string) []string { + names := make([]string, 0, len(columns)) + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// Unquote makes the table name to be formatted as raw string (unquoted). +// It is useful whe you don't want to query tables under the current database. +// For example: "INFORMATION_SCHEMA.TABLE_CONSTRAINTS" in MySQL. +func (s *SelectTable) Unquote() *SelectTable { + s.quote = false + return s +} + +// ref returns the table reference. +func (s *SelectTable) ref() string { + if !s.quote { + return s.name + } + b := &Builder{dialect: s.dialect} + b.writeSchema(s.schema) + b.Ident(s.name) + if s.as != "" { + b.WriteString(" AS ") + b.Ident(s.as) + } + return b.String() +} + +// implement the table view. +func (*SelectTable) view() {} + +// join table option. +type join struct { + on *Predicate + kind string + table TableView +} + +// clone a joiner. +func (j join) clone() join { + if sel, ok := j.table.(*Selector); ok { + j.table = sel.Clone() + } + j.on = j.on.clone() + return j +} + +// Selector is a builder for the `SELECT` statement. +type Selector struct { + Builder + // ctx stores contextual data typically from + // generated code such as alternate table schemas. + ctx context.Context + as string + selection []interface{} + from TableView + joins []join + where *Predicate + or bool + not bool + order []interface{} + group []string + having *Predicate + limit *int + offset *int + distinct bool + union []union + prefix Queries + lock *LockOptions +} + +// WithContext sets the context into the *Selector. +func (s *Selector) WithContext(ctx context.Context) *Selector { + if ctx == nil { + panic("nil context") + } + s.ctx = ctx + return s +} + +// Context returns the Selector context or Background +// if nil. +func (s *Selector) Context() context.Context { + if s.ctx != nil { + return s.ctx + } + return context.Background() +} + +// Select returns a new selector for the `SELECT` statement. +// +// t1 := Table("users").As("u") +// t2 := Select().From(Table("groups")).Where(EQ("user_id", 10)).As("g") +// return Select(t1.C("id"), t2.C("name")). +// From(t1). +// Join(t2). +// On(t1.C("id"), t2.C("user_id")) +// +func Select(columns ...string) *Selector { + return (&Selector{}).Select(columns...) +} + +// SelectExpr is like Select, but supports passing arbitrary +// expressions for SELECT clause. +func SelectExpr(exprs ...Querier) *Selector { + return (&Selector{}).SelectExpr(exprs...) +} + +// Select changes the columns selection of the SELECT statement. +// Empty selection means all columns *. +func (s *Selector) Select(columns ...string) *Selector { + s.selection = make([]interface{}, len(columns)) + for i := range columns { + s.selection[i] = columns[i] + } + return s +} + +// AppendSelect appends additional columns to the SELECT statement. +func (s *Selector) AppendSelect(columns ...string) *Selector { + for i := range columns { + s.selection = append(s.selection, columns[i]) + } + return s +} + +// SelectExpr changes the columns selection of the SELECT statement +// with custom list of expressions. +func (s *Selector) SelectExpr(exprs ...Querier) *Selector { + s.selection = make([]interface{}, len(exprs)) + for i := range exprs { + s.selection[i] = exprs[i] + } + return s +} + +// AppendSelectExpr appends additional expressions to the SELECT statement. +func (s *Selector) AppendSelectExpr(exprs ...Querier) *Selector { + for i := range exprs { + s.selection = append(s.selection, exprs[i]) + } + return s +} + +// AppendSelectExprAs appends additional expressions to the SELECT statement with the given name. +func (s *Selector) AppendSelectExprAs(expr Querier, as string) *Selector { + s.selection = append(s.selection, ExprFunc(func(b *Builder) { + b.WriteByte('(') + b.Join(expr) + b.WriteString(") AS ") + b.Ident(as) + })) + return s +} + +// SelectedColumns returns the selected columns in the Selector. +func (s *Selector) SelectedColumns() []string { + columns := make([]string, 0, len(s.selection)) + for i := range s.selection { + if c, ok := s.selection[i].(string); ok { + columns = append(columns, c) + } + } + return columns +} + +// UnqualifiedColumns returns the an unqualified version of the +// selected columns in the Selector. e.g. "t1"."c" => "c". +func (s *Selector) UnqualifiedColumns() []string { + columns := make([]string, 0, len(s.selection)) + for i := range s.selection { + c, ok := s.selection[i].(string) + if !ok { + continue + } + if s.isIdent(c) { + parts := strings.FieldsFunc(c, func(r rune) bool { + return r == '`' || r == '"' + }) + if n := len(parts); n > 0 && parts[n-1] != "" { + c = parts[n-1] + } + } + columns = append(columns, c) + } + return columns +} + +// From sets the source of `FROM` clause. +func (s *Selector) From(t TableView) *Selector { + s.from = t + if st, ok := t.(state); ok { + st.SetDialect(s.dialect) + } + return s +} + +// Distinct adds the DISTINCT keyword to the `SELECT` statement. +func (s *Selector) Distinct() *Selector { + s.distinct = true + return s +} + +// SetDistinct sets explicitly if the returned rows are distinct or indistinct. +func (s *Selector) SetDistinct(v bool) *Selector { + s.distinct = v + return s +} + +// Limit adds the `LIMIT` clause to the `SELECT` statement. +func (s *Selector) Limit(limit int) *Selector { + s.limit = &limit + return s +} + +// Offset adds the `OFFSET` clause to the `SELECT` statement. +func (s *Selector) Offset(offset int) *Selector { + s.offset = &offset + return s +} + +// Where sets or appends the given predicate to the statement. +func (s *Selector) Where(p *Predicate) *Selector { + if s.not { + p = Not(p) + s.not = false + } + switch { + case s.where == nil: + s.where = p + case s.where != nil && s.or: + s.where = Or(s.where, p) + s.or = false + default: + s.where = And(s.where, p) + } + return s +} + +// P returns the predicate of a selector. +func (s *Selector) P() *Predicate { + return s.where +} + +// SetP sets explicitly the predicate function for the selector and clear its previous state. +func (s *Selector) SetP(p *Predicate) *Selector { + s.where = p + s.or = false + s.not = false + return s +} + +// FromSelect copies the predicate from a selector. +func (s *Selector) FromSelect(s2 *Selector) *Selector { + s.where = s2.where + return s +} + +// Not sets the next coming predicate with not. +func (s *Selector) Not() *Selector { + s.not = true + return s +} + +// Or sets the next coming predicate with OR operator (disjunction). +func (s *Selector) Or() *Selector { + s.or = true + return s +} + +// Table returns the selected table. +func (s *Selector) Table() *SelectTable { + return s.from.(*SelectTable) +} + +// TableName returns the name of the selected table or alias of selector. +func (s *Selector) TableName() string { + switch view := s.from.(type) { + case *SelectTable: + return view.name + case *Selector: + return view.as + default: + panic(fmt.Sprintf("unhandled TableView type %T", s.from)) + } +} + +// Join appends a `JOIN` clause to the statement. +func (s *Selector) Join(t TableView) *Selector { + return s.join("JOIN", t) +} + +// LeftJoin appends a `LEFT JOIN` clause to the statement. +func (s *Selector) LeftJoin(t TableView) *Selector { + return s.join("LEFT JOIN", t) +} + +// RightJoin appends a `RIGHT JOIN` clause to the statement. +func (s *Selector) RightJoin(t TableView) *Selector { + return s.join("RIGHT JOIN", t) +} + +// join adds a join table to the selector with the given kind. +func (s *Selector) join(kind string, t TableView) *Selector { + s.joins = append(s.joins, join{ + kind: kind, + table: t, + }) + switch view := t.(type) { + case *SelectTable: + if view.as == "" { + view.as = "t" + strconv.Itoa(len(s.joins)) + } + case *Selector: + if view.as == "" { + view.as = "t" + strconv.Itoa(len(s.joins)) + } + } + if st, ok := t.(state); ok { + st.SetDialect(s.dialect) + } + return s +} + +// unionType describes an UNION type. +type unionType string + +const ( + unionAll unionType = "ALL" + unionDistinct unionType = "DISTINCT" +) + +// union query option. +type union struct { + unionType + TableView +} + +// Union appends the UNION clause to the query. +func (s *Selector) Union(t TableView) *Selector { + s.union = append(s.union, union{ + TableView: t, + }) + return s +} + +// UnionAll appends the UNION ALL clause to the query. +func (s *Selector) UnionAll(t TableView) *Selector { + s.union = append(s.union, union{ + unionType: unionAll, + TableView: t, + }) + return s +} + +// UnionDistinct appends the UNION DISTINCT clause to the query. +func (s *Selector) UnionDistinct(t TableView) *Selector { + s.union = append(s.union, union{ + unionType: unionDistinct, + TableView: t, + }) + return s +} + +// Prefix prefixes the query with list of queries. +func (s *Selector) Prefix(queries ...Querier) *Selector { + s.prefix = append(s.prefix, queries...) + return s +} + +// C returns a formatted string for a selected column from this statement. +func (s *Selector) C(column string) string { + if s.as != "" { + b := &Builder{dialect: s.dialect} + b.Ident(s.as) + b.WriteByte('.') + b.Ident(column) + return b.String() + } + return s.Table().C(column) +} + +// Columns returns a list of formatted strings for a selected columns from this statement. +func (s *Selector) Columns(columns ...string) []string { + names := make([]string, 0, len(columns)) + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// OnP sets or appends the given predicate for the `ON` clause of the statement. +func (s *Selector) OnP(p *Predicate) *Selector { + if len(s.joins) > 0 { + join := &s.joins[len(s.joins)-1] + switch { + case join.on == nil: + join.on = p + default: + join.on = And(join.on, p) + } + } + return s +} + +// On sets the `ON` clause for the `JOIN` operation. +func (s *Selector) On(c1, c2 string) *Selector { + s.OnP(P(func(builder *Builder) { + builder.Ident(c1).WriteOp(OpEQ).Ident(c2) + })) + return s +} + +// As give this selection an alias. +func (s *Selector) As(alias string) *Selector { + s.as = alias + return s +} + +// Count sets the Select statement to be a `SELECT COUNT(*)`. +func (s *Selector) Count(columns ...string) *Selector { + column := "*" + if len(columns) > 0 { + b := &Builder{} + b.IdentComma(columns...) + column = b.String() + } + s.Select(Count(column)) + return s +} + +// LockAction tells the transaction what to do in case of +// requesting a row that is locked by other transaction. +type LockAction string + +const ( + // NoWait means never wait and returns an error. + NoWait LockAction = "NOWAIT" + // SkipLocked means never wait and skip. + SkipLocked LockAction = "SKIP LOCKED" +) + +// LockStrength defines the strength of the lock (see the list below). +type LockStrength string + +// A list of all locking clauses. +const ( + LockShare LockStrength = "SHARE" + LockUpdate LockStrength = "UPDATE" + LockNoKeyUpdate LockStrength = "NO KEY UPDATE" + LockKeyShare LockStrength = "KEY SHARE" +) + +type ( + // LockOptions defines a SELECT statement + // lock for protecting concurrent updates. + LockOptions struct { + // Strength of the lock. + Strength LockStrength + // Action of the lock. + Action LockAction + // Tables are an option tables. + Tables []string + // custom clause for locking. + clause string + } + // LockOption allows configuring the LockConfig using functional options. + LockOption func(*LockOptions) +) + +// WithLockAction sets the Action of the lock. +func WithLockAction(action LockAction) LockOption { + return func(c *LockOptions) { + c.Action = action + } +} + +// WithLockTables sets the Tables of the lock. +func WithLockTables(tables ...string) LockOption { + return func(c *LockOptions) { + c.Tables = tables + } +} + +// WithLockClause allows providing a custom clause for +// locking the statement. For example, in MySQL <= 8.22: +// +// Select(). +// From(Table("users")). +// ForShare( +// WithLockClause("LOCK IN SHARE MODE"), +// ) +// +func WithLockClause(clause string) LockOption { + return func(c *LockOptions) { + c.clause = clause + } +} + +// For sets the lock configuration for suffixing the `SELECT` +// statement with the `FOR [SHARE | UPDATE] ...` clause. +func (s *Selector) For(l LockStrength, opts ...LockOption) *Selector { + if s.Dialect() == dialect.SQLite { + s.AddError(errors.New("sql: SELECT .. FOR UPDATE/SHARE not supported in SQLite")) + } + s.lock = &LockOptions{Strength: l} + for _, opt := range opts { + opt(s.lock) + } + return s +} + +// ForShare sets the lock configuration for suffixing the +// `SELECT` statement with the `FOR SHARE` clause. +func (s *Selector) ForShare(opts ...LockOption) *Selector { + return s.For(LockShare, opts...) +} + +// ForUpdate sets the lock configuration for suffixing the +// `SELECT` statement with the `FOR UPDATE` clause. +func (s *Selector) ForUpdate(opts ...LockOption) *Selector { + return s.For(LockUpdate, opts...) +} + +// Clone returns a duplicate of the selector, including all associated steps. It can be +// used to prepare common SELECT statements and use them differently after the clone is made. +func (s *Selector) Clone() *Selector { + if s == nil { + return nil + } + joins := make([]join, len(s.joins)) + for i := range s.joins { + joins[i] = s.joins[i].clone() + } + return &Selector{ + Builder: s.Builder.clone(), + ctx: s.ctx, + as: s.as, + or: s.or, + not: s.not, + from: s.from, + limit: s.limit, + offset: s.offset, + distinct: s.distinct, + where: s.where.clone(), + having: s.having.clone(), + joins: append([]join{}, joins...), + group: append([]string{}, s.group...), + order: append([]interface{}{}, s.order...), + selection: append([]interface{}{}, s.selection...), + } +} + +// Asc adds the ASC suffix for the given column. +func Asc(column string) string { + b := &Builder{} + b.Ident(column).WriteString(" ASC") + return b.String() +} + +// Desc adds the DESC suffix for the given column. +func Desc(column string) string { + b := &Builder{} + b.Ident(column).WriteString(" DESC") + return b.String() +} + +// OrderBy appends the `ORDER BY` clause to the `SELECT` statement. +func (s *Selector) OrderBy(columns ...string) *Selector { + for i := range columns { + s.order = append(s.order, columns[i]) + } + return s +} + +// OrderColumns returns the ordered columns in the Selector. +// Note, this function skips columns selected with expressions. +func (s *Selector) OrderColumns() []string { + columns := make([]string, 0, len(s.order)) + for i := range s.order { + if c, ok := s.order[i].(string); ok { + columns = append(columns, c) + } + } + return columns +} + +// OrderExpr appends the `ORDER BY` clause to the `SELECT` +// statement with custom list of expressions. +func (s *Selector) OrderExpr(exprs ...Querier) *Selector { + for i := range exprs { + s.order = append(s.order, exprs[i]) + } + return s +} + +// GroupBy appends the `GROUP BY` clause to the `SELECT` statement. +func (s *Selector) GroupBy(columns ...string) *Selector { + s.group = append(s.group, columns...) + return s +} + +// Having appends a predicate for the `HAVING` clause. +func (s *Selector) Having(p *Predicate) *Selector { + s.having = p + return s +} + +// Query returns query representation of a `SELECT` statement. +func (s *Selector) Query() (string, []interface{}) { + b := s.Builder.clone() + s.joinPrefix(&b) + b.WriteString("SELECT ") + if s.distinct { + b.WriteString("DISTINCT ") + } + if len(s.selection) > 0 { + s.joinSelect(&b) + } else { + b.WriteString("*") + } + switch t := s.from.(type) { + case *SelectTable: + b.WriteString(" FROM ") + t.SetDialect(s.dialect) + b.WriteString(t.ref()) + case *Selector: + b.WriteString(" FROM ") + t.SetDialect(s.dialect) + b.Nested(func(b *Builder) { + b.Join(t) + }) + b.WriteString(" AS ") + b.Ident(t.as) + case *WithBuilder: + b.WriteString(" FROM ") + t.SetDialect(s.dialect) + b.Ident(t.Name()) + } + for _, join := range s.joins { + b.WriteString(" " + join.kind + " ") + switch view := join.table.(type) { + case *SelectTable: + view.SetDialect(s.dialect) + b.WriteString(view.ref()) + case *Selector: + view.SetDialect(s.dialect) + b.Nested(func(b *Builder) { + b.Join(view) + }) + b.WriteString(" AS ") + b.Ident(view.as) + case *WithBuilder: + view.SetDialect(s.dialect) + b.Ident(view.Name()) + } + if join.on != nil { + b.WriteString(" ON ") + b.Join(join.on) + } + } + if s.where != nil { + b.WriteString(" WHERE ") + b.Join(s.where) + } + if len(s.group) > 0 { + b.WriteString(" GROUP BY ") + b.IdentComma(s.group...) + } + if s.having != nil { + b.WriteString(" HAVING ") + b.Join(s.having) + } + if len(s.union) > 0 { + s.joinUnion(&b) + } + if len(s.order) > 0 { + joinOrder(s.order, &b) + } + if s.limit != nil { + b.WriteString(" LIMIT ") + b.WriteString(strconv.Itoa(*s.limit)) + } + if s.offset != nil { + b.WriteString(" OFFSET ") + b.WriteString(strconv.Itoa(*s.offset)) + } + s.joinLock(&b) + s.total = b.total + s.AddError(b.Err()) + return b.String(), b.args +} + +func (s *Selector) joinPrefix(b *Builder) { + if len(s.prefix) > 0 { + b.join(s.prefix, " ") + b.Pad() + } +} + +func (s *Selector) joinLock(b *Builder) { + if s.lock == nil { + return + } + b.Pad() + if s.lock.clause != "" { + b.WriteString(s.lock.clause) + return + } + b.WriteString("FOR ").WriteString(string(s.lock.Strength)) + if len(s.lock.Tables) > 0 { + b.WriteString(" OF ").IdentComma(s.lock.Tables...) + } + if s.lock.Action != "" { + b.Pad().WriteString(string(s.lock.Action)) + } +} + +func (s *Selector) joinUnion(b *Builder) { + for _, union := range s.union { + b.WriteString(" UNION ") + if union.unionType != "" { + b.WriteString(string(union.unionType) + " ") + } + switch view := union.TableView.(type) { + case *SelectTable: + view.SetDialect(s.dialect) + b.WriteString(view.ref()) + case *Selector: + view.SetDialect(s.dialect) + b.Join(view) + if view.as != "" { + b.WriteString(" AS ") + b.Ident(view.as) + } + } + } +} + +func joinOrder(order []interface{}, b *Builder) { + b.WriteString(" ORDER BY ") + for i := range order { + if i > 0 { + b.Comma() + } + switch r := order[i].(type) { + case string: + b.Ident(r) + case Querier: + b.Join(r) + } + } +} + +func (s *Selector) joinSelect(b *Builder) { + for i := range s.selection { + if i > 0 { + b.Comma() + } + switch s := s.selection[i].(type) { + case string: + b.Ident(s) + case Querier: + b.Join(s) + } + } +} + +// implement the table view interface. +func (*Selector) view() {} + +// WithBuilder is the builder for the `WITH` statement. +type WithBuilder struct { + Builder + recursive bool + ctes []struct { + name string + columns []string + s *Selector + } +} + +// With returns a new builder for the `WITH` statement. +// +// n := Queries{ +// With("users_view").As(Select().From(Table("users"))), +// Select().From(Table("users_view")), +// } +// return n.Query() +// +func With(name string, columns ...string) *WithBuilder { + return &WithBuilder{ + ctes: []struct { + name string + columns []string + s *Selector + }{ + {name: name, columns: columns}, + }, + } +} + +// WithRecursive returns a new builder for the `WITH RECURSIVE` statement. +// +// n := Queries{ +// WithRecursive("users_view").As(Select().From(Table("users"))), +// Select().From(Table("users_view")), +// } +// return n.Query() +// +func WithRecursive(name string, columns ...string) *WithBuilder { + w := With(name, columns...) + w.recursive = true + return w +} + +// Name returns the name of the view. +func (w *WithBuilder) Name() string { + return w.ctes[0].name +} + +// As sets the view sub query. +func (w *WithBuilder) As(s *Selector) *WithBuilder { + w.ctes[len(w.ctes)-1].s = s + return w +} + +// With appends another named CTE to the statement. +func (w *WithBuilder) With(name string, columns ...string) *WithBuilder { + w.ctes = append(w.ctes, With(name, columns...).ctes...) + return w +} + +// C returns a formatted string for the WITH column. +func (w *WithBuilder) C(column string) string { + b := &Builder{dialect: w.dialect} + b.Ident(w.Name()).WriteByte('.').Ident(column) + return b.String() +} + +// Query returns query representation of a `WITH` clause. +func (w *WithBuilder) Query() (string, []interface{}) { + w.WriteString("WITH ") + if w.recursive { + w.WriteString("RECURSIVE ") + } + for i, cte := range w.ctes { + if i > 0 { + w.Comma() + } + w.Ident(cte.name) + if len(cte.columns) > 0 { + w.WriteByte('(') + w.IdentComma(cte.columns...) + w.WriteByte(')') + } + w.WriteString(" AS ") + w.Nested(func(b *Builder) { + b.Join(cte.s) + }) + } + return w.String(), w.args +} + +// implement the table view interface. +func (*WithBuilder) view() {} + +// WindowBuilder represents a builder for a window clause. +// Note that window functions support is limited and used +// only to query rows-limited edges in pagination. +type WindowBuilder struct { + Builder + fn string // e.g. ROW_NUMBER(), RANK(). + partition func(*Builder) + order []interface{} +} + +// RowNumber returns a new window clause with the ROW_NUMBER() as a function. +// Using this function will assign a each row a number, from 1 to N, in the +// order defined by the ORDER BY clause in the window spec. +func RowNumber() *WindowBuilder { + return &WindowBuilder{fn: "ROW_NUMBER"} +} + +// PartitionBy indicates to divide the query rows into groups by the given columns. +// Note that, standard SQL spec allows partition only by columns, and in order to +// use the "expression" version, use the PartitionByExpr. +func (w *WindowBuilder) PartitionBy(columns ...string) *WindowBuilder { + w.partition = func(b *Builder) { + b.IdentComma(columns...) + } + return w +} + +// PartitionExpr indicates to divide the query rows into groups by the given expression. +func (w *WindowBuilder) PartitionExpr(x Querier) *WindowBuilder { + w.partition = func(b *Builder) { + b.Join(x) + } + return w +} + +// OrderBy indicates how to sort rows in each partition. +func (w *WindowBuilder) OrderBy(columns ...string) *WindowBuilder { + for i := range columns { + w.order = append(w.order, columns[i]) + } + return w +} + +// OrderExpr appends the `ORDER BY` clause to the window +// partition with custom list of expressions. +func (w *WindowBuilder) OrderExpr(exprs ...Querier) *WindowBuilder { + for i := range exprs { + w.order = append(w.order, exprs[i]) + } + return w +} + +// Query returns query representation of the window function. +func (w *WindowBuilder) Query() (string, []interface{}) { + w.WriteString(w.fn) + w.WriteString("() OVER ") + w.Nested(func(b *Builder) { + if w.partition != nil { + b.WriteString("PARTITION BY ") + w.partition(b) + } + if w.order != nil { + joinOrder(w.order, b) + } + }) + return w.Builder.String(), w.args +} + +// Wrapper wraps a given Querier with different format. +// Used to prefix/suffix other queries. +type Wrapper struct { + format string + wrapped Querier +} + +// Query returns query representation of a wrapped Querier. +func (w *Wrapper) Query() (string, []interface{}) { + query, args := w.wrapped.Query() + return fmt.Sprintf(w.format, query), args +} + +// SetDialect calls SetDialect on the wrapped query. +func (w *Wrapper) SetDialect(name string) { + if s, ok := w.wrapped.(state); ok { + s.SetDialect(name) + } +} + +// Dialect calls Dialect on the wrapped query. +func (w *Wrapper) Dialect() string { + if s, ok := w.wrapped.(state); ok { + return s.Dialect() + } + return "" +} + +// Total returns the total number of arguments so far. +func (w *Wrapper) Total() int { + if s, ok := w.wrapped.(state); ok { + return s.Total() + } + return 0 +} + +// SetTotal sets the value of the total arguments. +// Used to pass this information between sub queries/expressions. +func (w *Wrapper) SetTotal(total int) { + if s, ok := w.wrapped.(state); ok { + s.SetTotal(total) + } +} + +// Raw returns a raw SQL query that is placed as-is in the query. +func Raw(s string) Querier { return &raw{s} } + +type raw struct{ s string } + +func (r *raw) Query() (string, []interface{}) { return r.s, nil } + +// Expr returns an SQL expression that implements the Querier interface. +func Expr(exr string, args ...interface{}) Querier { return &expr{s: exr, args: args} } + +type expr struct { + s string + args []interface{} +} + +func (e *expr) Query() (string, []interface{}) { return e.s, e.args } + +// ExprFunc returns an expression function that implements the Querier interface. +// +// Update("users"). +// Set("x", ExprFunc(func(b *Builder) { +// // The sql.Builder config (argc and dialect) +// // was set before the function was executed. +// b.Ident("x").WriteOp(OpAdd).Arg(1) +// })) +// +func ExprFunc(fn func(*Builder)) Querier { + return &exprFunc{fn: fn} +} + +type exprFunc struct { + Builder + fn func(*Builder) +} + +func (e *exprFunc) Query() (string, []interface{}) { + e.fn(&e.Builder) + return e.Builder.Query() +} + +// Queries are list of queries join with space between them. +type Queries []Querier + +// Query returns query representation of Queriers. +func (n Queries) Query() (string, []interface{}) { + b := &Builder{} + for i := range n { + if i > 0 { + b.Pad() + } + query, args := n[i].Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + return b.String(), b.args +} + +// Builder is the base query builder for the sql dsl. +type Builder struct { + sb *strings.Builder // underlying builder. + dialect string // configured dialect. + args []interface{} // query parameters. + total int // total number of parameters in query tree. + errs []error // errors that added during the query construction. + qualifier string // qualifier to prefix identifiers (e.g. table name). +} + +// Quote quotes the given identifier with the characters based +// on the configured dialect. It defaults to "`". +func (b *Builder) Quote(ident string) string { + quote := "`" + switch { + case b.postgres(): + // If it was quoted with the wrong + // identifier character. + if strings.Contains(ident, "`") { + return strings.ReplaceAll(ident, "`", `"`) + } + quote = `"` + // An identifier for unknown dialect. + case b.dialect == "" && strings.ContainsAny(ident, "`\""): + return ident + } + return quote + ident + quote +} + +// Ident appends the given string as an identifier. +func (b *Builder) Ident(s string) *Builder { + switch { + case len(s) == 0: + case s != "*" && !b.isIdent(s) && !isFunc(s) && !isModifier(s): + if b.qualifier != "" { + b.WriteString(b.Quote(b.qualifier)).WriteByte('.') + } + b.WriteString(b.Quote(s)) + case (isFunc(s) || isModifier(s)) && b.postgres(): + // Modifiers and aggregation functions that + // were called without dialect information. + b.WriteString(strings.ReplaceAll(s, "`", `"`)) + default: + b.WriteString(s) + } + return b +} + +// IdentComma calls Ident on all arguments and adds a comma between them. +func (b *Builder) IdentComma(s ...string) *Builder { + for i := range s { + if i > 0 { + b.Comma() + } + b.Ident(s[i]) + } + return b +} + +// String returns the accumulated string. +func (b *Builder) String() string { + if b.sb == nil { + return "" + } + return b.sb.String() +} + +// WriteByte wraps the Buffer.WriteByte to make it chainable with other methods. +func (b *Builder) WriteByte(c byte) *Builder { + if b.sb == nil { + b.sb = &strings.Builder{} + } + b.sb.WriteByte(c) + return b +} + +// WriteString wraps the Buffer.WriteString to make it chainable with other methods. +func (b *Builder) WriteString(s string) *Builder { + if b.sb == nil { + b.sb = &strings.Builder{} + } + b.sb.WriteString(s) + return b +} + +// Len returns the number of accumulated bytes. +func (b *Builder) Len() int { + if b.sb == nil { + return 0 + } + return b.sb.Len() +} + +// Reset resets the Builder to be empty. +func (b *Builder) Reset() *Builder { + if b.sb != nil { + b.sb.Reset() + } + return b +} + +// AddError appends an error to the builder errors. +func (b *Builder) AddError(err error) *Builder { + // allowed nil error make build process easier + if err != nil { + b.errs = append(b.errs, err) + } + return b +} + +func (b *Builder) writeSchema(schema string) { + if schema != "" && b.dialect != dialect.SQLite { + b.Ident(schema).WriteByte('.') + } +} + +// Err returns a concatenated error of all errors encountered during +// the query-building, or were added manually by calling AddError. +func (b *Builder) Err() error { + if len(b.errs) == 0 { + return nil + } + br := strings.Builder{} + for i := range b.errs { + if i > 0 { + br.WriteString("; ") + } + br.WriteString(b.errs[i].Error()) + } + return fmt.Errorf(br.String()) +} + +// An Op represents an operator. +type Op int + +const ( + // Predicate operators. + OpEQ Op = iota // = + OpNEQ // <> + OpGT // > + OpGTE // >= + OpLT // < + OpLTE // <= + OpIn // IN + OpNotIn // NOT IN + OpLike // LIKE + OpIsNull // IS NULL + OpNotNull // IS NOT NULL + + // Arithmetic operators. + OpAdd // + + OpSub // - + OpMul // * + OpDiv // / (Quotient) + OpMod // % (Reminder) +) + +var ops = [...]string{ + OpEQ: "=", + OpNEQ: "<>", + OpGT: ">", + OpGTE: ">=", + OpLT: "<", + OpLTE: "<=", + OpIn: "IN", + OpNotIn: "NOT IN", + OpLike: "LIKE", + OpIsNull: "IS NULL", + OpNotNull: "IS NOT NULL", + OpAdd: "+", + OpSub: "-", + OpMul: "*", + OpDiv: "/", + OpMod: "%", +} + +// WriteOp writes an operator to the builder. +func (b *Builder) WriteOp(op Op) *Builder { + switch { + case op >= OpEQ && op <= OpLike || op >= OpAdd && op <= OpMod: + b.Pad().WriteString(ops[op]).Pad() + case op == OpIsNull || op == OpNotNull: + b.Pad().WriteString(ops[op]) + default: + panic(fmt.Sprintf("invalid op %d", op)) + } + return b +} + +type ( + // StmtInfo holds an information regarding + // the statement + StmtInfo struct { + // The Dialect of the SQL driver. + Dialect string + } + // ParamFormatter wraps the FormatPram function. + ParamFormatter interface { + // The FormatParam function lets users to define + // custom placeholder formatting for their types. + // For example, formatting the default placeholder + // from '?' to 'ST_GeomFromWKB(?)' for MySQL dialect. + FormatParam(placeholder string, info *StmtInfo) string + } +) + +// Arg appends an input argument to the builder. +func (b *Builder) Arg(a interface{}) *Builder { + switch a := a.(type) { + case *raw: + b.WriteString(a.s) + return b + case Querier: + b.Join(a) + return b + } + b.total++ + b.args = append(b.args, a) + // Default placeholder param (MySQL and SQLite). + param := "?" + if b.postgres() { + // PostgreSQL arguments are referenced using the syntax $n. + // $1 refers to the 1st argument, $2 to the 2nd, and so on. + param = "$" + strconv.Itoa(b.total) + } + if f, ok := a.(ParamFormatter); ok { + param = f.FormatParam(param, &StmtInfo{ + Dialect: b.dialect, + }) + } + b.WriteString(param) + return b +} + +// Args appends a list of arguments to the builder. +func (b *Builder) Args(a ...interface{}) *Builder { + for i := range a { + if i > 0 { + b.Comma() + } + b.Arg(a[i]) + } + return b +} + +// Comma adds a comma to the query. +func (b *Builder) Comma() *Builder { + return b.WriteString(", ") +} + +// Pad adds a space to the query. +func (b *Builder) Pad() *Builder { + return b.WriteByte(' ') +} + +// Join joins a list of Queries to the builder. +func (b *Builder) Join(qs ...Querier) *Builder { + return b.join(qs, "") +} + +// JoinComma joins a list of Queries and adds comma between them. +func (b *Builder) JoinComma(qs ...Querier) *Builder { + return b.join(qs, ", ") +} + +// join joins a list of Queries to the builder with a given separator. +func (b *Builder) join(qs []Querier, sep string) *Builder { + for i, q := range qs { + if i > 0 { + b.WriteString(sep) + } + st, ok := q.(state) + if ok { + st.SetDialect(b.dialect) + st.SetTotal(b.total) + } + query, args := q.Query() + b.WriteString(query) + b.args = append(b.args, args...) + b.total += len(args) + if qe, ok := q.(querierErr); ok { + if err := qe.Err(); err != nil { + b.AddError(err) + } + } + } + return b +} + +// Nested gets a callback, and wraps its result with parentheses. +func (b *Builder) Nested(f func(*Builder)) *Builder { + nb := &Builder{dialect: b.dialect, total: b.total, sb: &strings.Builder{}} + nb.WriteByte('(') + f(nb) + nb.WriteByte(')') + b.WriteString(nb.String()) + b.args = append(b.args, nb.args...) + b.total = nb.total + return b +} + +// SetDialect sets the builder dialect. It's used for garnering dialect specific queries. +func (b *Builder) SetDialect(dialect string) { + b.dialect = dialect +} + +// Dialect returns the dialect of the builder. +func (b Builder) Dialect() string { + return b.dialect +} + +// Total returns the total number of arguments so far. +func (b Builder) Total() int { + return b.total +} + +// SetTotal sets the value of the total arguments. +// Used to pass this information between sub queries/expressions. +func (b *Builder) SetTotal(total int) { + b.total = total +} + +// Query implements the Querier interface. +func (b Builder) Query() (string, []interface{}) { + return b.String(), b.args +} + +// clone returns a shallow clone of a builder. +func (b Builder) clone() Builder { + c := Builder{dialect: b.dialect, total: b.total, sb: &strings.Builder{}} + if len(b.args) > 0 { + c.args = append(c.args, b.args...) + } + if b.sb != nil { + c.sb.WriteString(b.sb.String()) + } + return c +} + +// postgres reports if the builder dialect is PostgreSQL. +func (b Builder) postgres() bool { + return b.Dialect() == dialect.Postgres +} + +// mysql reports if the builder dialect is MySQL. +func (b Builder) mysql() bool { + return b.Dialect() == dialect.MySQL +} + +// fromIdent sets the builder dialect from the identifier format. +func (b *Builder) fromIdent(ident string) { + if strings.Contains(ident, `"`) { + b.SetDialect(dialect.Postgres) + } + // otherwise, use the default. +} + +// isIdent reports if the given string is a dialect identifier. +func (b *Builder) isIdent(s string) bool { + switch { + case b.postgres(): + return strings.Contains(s, `"`) + default: + return strings.Contains(s, "`") + } +} + +// state wraps the all methods for setting and getting +// update state between all queries in the query tree. +type state interface { + Dialect() string + SetDialect(string) + Total() int + SetTotal(int) +} + +// DialectBuilder prefixes all root builders with the `Dialect` constructor. +type DialectBuilder struct { + dialect string +} + +// Dialect creates a new DialectBuilder with the given dialect name. +func Dialect(name string) *DialectBuilder { + return &DialectBuilder{name} +} + +// Describe creates a DescribeBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Describe("users") +// +func (d *DialectBuilder) Describe(name string) *DescribeBuilder { + b := Describe(name) + b.SetDialect(d.dialect) + return b +} + +// CreateTable creates a TableBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// CreateTable("users"). +// Columns( +// Column("id").Type("int").Attr("auto_increment"), +// Column("name").Type("varchar(255)"), +// ). +// PrimaryKey("id") +// +func (d *DialectBuilder) CreateTable(name string) *TableBuilder { + b := CreateTable(name) + b.SetDialect(d.dialect) + return b +} + +// AlterTable creates a TableAlter for the configured dialect. +// +// Dialect(dialect.Postgres). +// AlterTable("users"). +// AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). +// AddForeignKey(ForeignKey().Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")). +// OnDelete("CASCADE"), +// ) +// +func (d *DialectBuilder) AlterTable(name string) *TableAlter { + b := AlterTable(name) + b.SetDialect(d.dialect) + return b +} + +// AlterIndex creates an IndexAlter for the configured dialect. +// +// Dialect(dialect.Postgres). +// AlterIndex("old"). +// Rename("new") +// +func (d *DialectBuilder) AlterIndex(name string) *IndexAlter { + b := AlterIndex(name) + b.SetDialect(d.dialect) + return b +} + +// Column creates a ColumnBuilder for the configured dialect. +// +// Dialect(dialect.Postgres).. +// Column("group_id").Type("int").Attr("UNIQUE") +// +func (d *DialectBuilder) Column(name string) *ColumnBuilder { + b := Column(name) + b.SetDialect(d.dialect) + return b +} + +// Insert creates a InsertBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Insert("users").Columns("age").Values(1) +// +func (d *DialectBuilder) Insert(table string) *InsertBuilder { + b := Insert(table) + b.SetDialect(d.dialect) + return b +} + +// Update creates a UpdateBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Update("users").Set("name", "foo") +// +func (d *DialectBuilder) Update(table string) *UpdateBuilder { + b := Update(table) + b.SetDialect(d.dialect) + return b +} + +// Delete creates a DeleteBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Delete().From("users") +// +func (d *DialectBuilder) Delete(table string) *DeleteBuilder { + b := Delete(table) + b.SetDialect(d.dialect) + return b +} + +// Select creates a Selector for the configured dialect. +// +// Dialect(dialect.Postgres). +// Select().From(Table("users")) +// +func (d *DialectBuilder) Select(columns ...string) *Selector { + b := Select(columns...) + b.SetDialect(d.dialect) + return b +} + +// SelectExpr is like Select, but supports passing arbitrary +// expressions for SELECT clause. +// +// Dialect(dialect.Postgres). +// SelectExpr(expr...). +// From(Table("users")) +// +func (d *DialectBuilder) SelectExpr(exprs ...Querier) *Selector { + b := SelectExpr(exprs...) + b.SetDialect(d.dialect) + return b +} + +// Table creates a SelectTable for the configured dialect. +// +// Dialect(dialect.Postgres). +// Table("users").As("u") +// +func (d *DialectBuilder) Table(name string) *SelectTable { + b := Table(name) + b.SetDialect(d.dialect) + return b +} + +// With creates a WithBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// With("users_view"). +// As(Select().From(Table("users"))) +// +func (d *DialectBuilder) With(name string) *WithBuilder { + b := With(name) + b.SetDialect(d.dialect) + return b +} + +// CreateIndex creates a IndexBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// CreateIndex("unique_name"). +// Unique(). +// Table("users"). +// Columns("first", "last") +// +func (d *DialectBuilder) CreateIndex(name string) *IndexBuilder { + b := CreateIndex(name) + b.SetDialect(d.dialect) + return b +} + +// DropIndex creates a DropIndexBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// DropIndex("name") +// +func (d *DialectBuilder) DropIndex(name string) *DropIndexBuilder { + b := DropIndex(name) + b.SetDialect(d.dialect) + return b +} + +func isFunc(s string) bool { + return strings.Contains(s, "(") && strings.Contains(s, ")") +} + +func isModifier(s string) bool { + for _, m := range [...]string{"DISTINCT", "ALL", "WITH ROLLUP"} { + if strings.HasPrefix(s, m) { + return true + } + } + return false +} diff --git a/vendor/entgo.io/ent/dialect/sql/driver.go b/vendor/entgo.io/ent/dialect/sql/driver.go new file mode 100644 index 00000000..f7c09c63 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/driver.go @@ -0,0 +1,184 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sql + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "strings" + + "entgo.io/ent/dialect" +) + +// Driver is a dialect.Driver implementation for SQL based databases. +type Driver struct { + Conn + dialect string +} + +// NewDriver creates a new Driver with the given Conn and dialect. +func NewDriver(dialect string, c Conn) *Driver { + return &Driver{dialect: dialect, Conn: c} +} + +// Open wraps the database/sql.Open method and returns a dialect.Driver that implements the an ent/dialect.Driver interface. +func Open(dialect, source string) (*Driver, error) { + db, err := sql.Open(dialect, source) + if err != nil { + return nil, err + } + return NewDriver(dialect, Conn{db}), nil +} + +// OpenDB wraps the given database/sql.DB method with a Driver. +func OpenDB(dialect string, db *sql.DB) *Driver { + return NewDriver(dialect, Conn{db}) +} + +// DB returns the underlying *sql.DB instance. +func (d Driver) DB() *sql.DB { + return d.ExecQuerier.(*sql.DB) +} + +// Dialect implements the dialect.Dialect method. +func (d Driver) Dialect() string { + // If the underlying driver is wrapped with a telemetry driver. + for _, name := range []string{dialect.MySQL, dialect.SQLite, dialect.Postgres} { + if strings.HasPrefix(d.dialect, name) { + return name + } + } + return d.dialect +} + +// Tx starts and returns a transaction. +func (d *Driver) Tx(ctx context.Context) (dialect.Tx, error) { + return d.BeginTx(ctx, nil) +} + +// BeginTx starts a transaction with options. +func (d *Driver) BeginTx(ctx context.Context, opts *TxOptions) (dialect.Tx, error) { + tx, err := d.DB().BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{ + Conn: Conn{tx}, + Tx: tx, + }, nil +} + +// Close closes the underlying connection. +func (d *Driver) Close() error { return d.DB().Close() } + +// Tx implements dialect.Tx interface. +type Tx struct { + Conn + driver.Tx +} + +// ExecQuerier wraps the standard Exec and Query methods. +type ExecQuerier interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +// Conn implements dialect.ExecQuerier given ExecQuerier. +type Conn struct { + ExecQuerier +} + +// Exec implements the dialect.Exec method. +func (c Conn) Exec(ctx context.Context, query string, args, v interface{}) error { + argv, ok := args.([]interface{}) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []interface{} for args", v) + } + switch v := v.(type) { + case nil: + if _, err := c.ExecContext(ctx, query, argv...); err != nil { + return err + } + case *sql.Result: + res, err := c.ExecContext(ctx, query, argv...) + if err != nil { + return err + } + *v = res + default: + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Result", v) + } + return nil +} + +// Query implements the dialect.Query method. +func (c Conn) Query(ctx context.Context, query string, args, v interface{}) error { + vr, ok := v.(*Rows) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Rows", v) + } + argv, ok := args.([]interface{}) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []interface{} for args", args) + } + rows, err := c.QueryContext(ctx, query, argv...) + if err != nil { + return err + } + *vr = Rows{rows} + return nil +} + +var _ dialect.Driver = (*Driver)(nil) + +type ( + // Rows wraps the sql.Rows to avoid locks copy. + Rows struct{ ColumnScanner } + // Result is an alias to sql.Result. + Result = sql.Result + // NullBool is an alias to sql.NullBool. + NullBool = sql.NullBool + // NullInt64 is an alias to sql.NullInt64. + NullInt64 = sql.NullInt64 + // NullString is an alias to sql.NullString. + NullString = sql.NullString + // NullFloat64 is an alias to sql.NullFloat64. + NullFloat64 = sql.NullFloat64 + // NullTime represents a time.Time that may be null. + NullTime = sql.NullTime + // TxOptions holds the transaction options to be used in DB.BeginTx. + TxOptions = sql.TxOptions +) + +// NullScanner represents an sql.Scanner that may be null. +// NullScanner implements the sql.Scanner interface so it can +// be used as a scan destination, similar to the types above. +type NullScanner struct { + S sql.Scanner + Valid bool // Valid is true if the Scan value is not NULL. +} + +// Scan implements the Scanner interface. +func (n *NullScanner) Scan(value interface{}) error { + n.Valid = value != nil + if n.Valid { + return n.S.Scan(value) + } + return nil +} + +// ColumnScanner is the interface that wraps the standard +// sql.Rows methods used for scanning database rows. +type ColumnScanner interface { + Close() error + ColumnTypes() ([]*sql.ColumnType, error) + Columns() ([]string, error) + Err() error + Next() bool + NextResultSet() bool + Scan(dest ...interface{}) error +} diff --git a/vendor/entgo.io/ent/dialect/sql/scan.go b/vendor/entgo.io/ent/dialect/sql/scan.go new file mode 100644 index 00000000..b3e63e9e --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/scan.go @@ -0,0 +1,272 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sql + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "strings" +) + +// ScanOne scans one row to the given value. It fails if the rows holds more than 1 row. +func ScanOne(rows ColumnScanner, v interface{}) error { + columns, err := rows.Columns() + if err != nil { + return fmt.Errorf("sql/scan: failed getting column names: %w", err) + } + if n := len(columns); n != 1 { + return fmt.Errorf("sql/scan: unexpected number of columns: %d", n) + } + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + if err := rows.Scan(v); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("sql/scan: expect exactly one row in result set") + } + return rows.Err() +} + +// ScanInt64 scans and returns an int64 from the rows. +func ScanInt64(rows ColumnScanner) (int64, error) { + var n int64 + if err := ScanOne(rows, &n); err != nil { + return 0, err + } + return n, nil +} + +// ScanInt scans and returns an int from the rows. +func ScanInt(rows ColumnScanner) (int, error) { + n, err := ScanInt64(rows) + if err != nil { + return 0, err + } + return int(n), nil +} + +// ScanBool scans and returns a boolean from the rows. +func ScanBool(rows ColumnScanner) (bool, error) { + var b bool + if err := ScanOne(rows, &b); err != nil { + return false, err + } + return b, nil +} + +// ScanString scans and returns a string from the rows. +func ScanString(rows ColumnScanner) (string, error) { + var s string + if err := ScanOne(rows, &s); err != nil { + return "", err + } + return s, nil +} + +// ScanValue scans and returns a driver.Value from the rows. +func ScanValue(rows ColumnScanner) (driver.Value, error) { + var v driver.Value + if err := ScanOne(rows, &v); err != nil { + return "", err + } + return v, nil +} + +// ScanSlice scans the given ColumnScanner (basically, sql.Row or sql.Rows) into the given slice. +func ScanSlice(rows ColumnScanner, v interface{}) error { + columns, err := rows.Columns() + if err != nil { + return fmt.Errorf("sql/scan: failed getting column names: %w", err) + } + rv := reflect.ValueOf(v) + switch { + case rv.Kind() != reflect.Ptr: + if t := reflect.TypeOf(v); t != nil { + return fmt.Errorf("sql/scan: ScanSlice(non-pointer %s)", t) + } + fallthrough + case rv.IsNil(): + return fmt.Errorf("sql/scan: ScanSlice(nil)") + } + rv = reflect.Indirect(rv) + if k := rv.Kind(); k != reflect.Slice { + return fmt.Errorf("sql/scan: invalid type %s. expected slice as an argument", k) + } + scan, err := scanType(rv.Type().Elem(), columns) + if err != nil { + return err + } + if n, m := len(columns), len(scan.columns); n > m { + return fmt.Errorf("sql/scan: columns do not match (%d > %d)", n, m) + } + for rows.Next() { + values := scan.values() + if err := rows.Scan(values...); err != nil { + return fmt.Errorf("sql/scan: failed scanning rows: %w", err) + } + vv := reflect.Append(rv, scan.value(values...)) + rv.Set(vv) + } + return rows.Err() +} + +// rowScan is the configuration for scanning one sql.Row. +type rowScan struct { + // column types of a row. + columns []reflect.Type + // value functions that converts the row columns (result) to a reflect.Value. + value func(v ...interface{}) reflect.Value +} + +// values returns a []interface{} from the configured column types. +func (r *rowScan) values() []interface{} { + values := make([]interface{}, len(r.columns)) + for i := range r.columns { + values[i] = reflect.New(r.columns[i]).Interface() + } + return values +} + +// scanType returns rowScan for the given reflect.Type. +func scanType(typ reflect.Type, columns []string) (*rowScan, error) { + switch k := typ.Kind(); { + case assignable(typ): + return &rowScan{ + columns: []reflect.Type{typ}, + value: func(v ...interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(v[0])) + }, + }, nil + case k == reflect.Ptr: + return scanPtr(typ, columns) + case k == reflect.Struct: + return scanStruct(typ, columns) + default: + return nil, fmt.Errorf("sql/scan: unsupported type ([]%s)", k) + } +} + +var scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// assignable reports if the given type can be assigned directly by `Rows.Scan`. +func assignable(typ reflect.Type) bool { + switch k := typ.Kind(); { + case typ.Implements(scannerType): + case k == reflect.Interface && typ.NumMethod() == 0: + case k == reflect.String || k >= reflect.Bool && k <= reflect.Float64: + case (k == reflect.Slice || k == reflect.Array) && typ.Elem().Kind() == reflect.Uint8: + default: + return false + } + return true +} + +// scanStruct returns the a configuration for scanning an sql.Row into a struct. +func scanStruct(typ reflect.Type, columns []string) (*rowScan, error) { + var ( + scan = &rowScan{} + idxs = make([][]int, 0, typ.NumField()) + names = make(map[string][]int, typ.NumField()) + ) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + // Skip unexported fields. + if f.PkgPath != "" { + continue + } + // Support 1-level embedding to accepts types as `type T struct {ent.T; V int}`. + if typ := f.Type; f.Anonymous && typ.Kind() == reflect.Struct { + for j := 0; j < typ.NumField(); j++ { + names[columnName(typ.Field(j))] = []int{i, j} + } + continue + } + names[columnName(f)] = []int{i} + } + for _, c := range columns { + // Normalize columns if necessary, for example: COUNT(*) => count. + name := strings.ToLower(strings.Split(c, "(")[0]) + idx, ok := names[name] + if !ok { + return nil, fmt.Errorf("sql/scan: missing struct field for column: %s (%s)", c, name) + } + idxs = append(idxs, idx) + rtype := typ.Field(idx[0]).Type + if len(idx) > 1 { + rtype = rtype.Field(idx[1]).Type + } + if !nillable(rtype) { + // Create a pointer to the actual reflect + // types to accept optional struct fields. + rtype = reflect.PtrTo(rtype) + } + scan.columns = append(scan.columns, rtype) + } + scan.value = func(vs ...interface{}) reflect.Value { + st := reflect.New(typ).Elem() + for i, v := range vs { + rv := reflect.Indirect(reflect.ValueOf(v)) + if rv.IsNil() { + continue + } + idx := idxs[i] + rvalue := st.Field(idx[0]) + if len(idx) > 1 { + rvalue = rvalue.Field(idx[1]) + } + if !nillable(rvalue.Type()) { + rv = reflect.Indirect(rv) + } + rvalue.Set(rv) + } + return st + } + return scan, nil +} + +// columnName returns the column name of a struct-field. +func columnName(f reflect.StructField) string { + name := strings.ToLower(f.Name) + if tag, ok := f.Tag.Lookup("sql"); ok { + name = tag + } else if tag, ok := f.Tag.Lookup("json"); ok { + name = strings.Split(tag, ",")[0] + } + return name +} + +// nillable reports if the reflect-type can have nil value. +func nillable(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Slice, reflect.Map, reflect.Ptr, reflect.UnsafePointer: + return true + } + return false +} + +// scanPtr wraps the underlying type with rowScan. +func scanPtr(typ reflect.Type, columns []string) (*rowScan, error) { + typ = typ.Elem() + scan, err := scanType(typ, columns) + if err != nil { + return nil, err + } + wrap := scan.value + scan.value = func(vs ...interface{}) reflect.Value { + v := wrap(vs...) + pt := reflect.PtrTo(v.Type()) + pv := reflect.New(pt.Elem()) + pv.Elem().Set(v) + return pv + } + return scan, nil +} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c80..00000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af..00000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986de..00000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c49..00000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190..00000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a5..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b846245..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index a5b051ff..82c6cc48 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -6191,6 +6191,87 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -7518,6 +7599,28 @@ var awsPartition = partition{ }, }, }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "eks": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -10928,7 +11031,23 @@ var awsPartition = partition{ }, }, "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ @@ -21058,6 +21177,67 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24985,13 +25165,23 @@ var awscnPartition = partition{ }, }, "health": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "iam": service{ @@ -30192,6 +30382,26 @@ var awsusgovPartition = partition{ }, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 504aded0..deb7af40 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.54" +const SDKVersion = "1.44.57" diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go index ca3bb9de..6632a643 100644 --- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go +++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go @@ -26,7 +26,7 @@ import ( // Constants and default values for the package bce const ( - SDK_VERSION = "0.9.129" + SDK_VERSION = "0.9.130" URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path DEFAULT_DOMAIN = "baidubce.com" DEFAULT_PROTOCOL = "http" diff --git a/vendor/github.com/daodao97/fly/README.md b/vendor/github.com/daodao97/fly/README.md index 2301199a..017b21da 100644 --- a/vendor/github.com/daodao97/fly/README.md +++ b/vendor/github.com/daodao97/fly/README.md @@ -4,7 +4,7 @@ A simple go db library - data hook, Easy transition db data - sql builder, Not need handwritten SQL -- hasOne/hasMany, Convenient access to associated data +- hasOne/hasMany, Convenient get linked data - validator, Flexible verification policies - extensible, Easy extend custom hook/sql/validator - cacheEnable, Support for custom cache implementations @@ -71,13 +71,13 @@ func main() { } type User struct { - ID int64 `json:"id"` - Name string `json:"name"` - Status int64 `json:"status"` - Profile *Profile `json:"profile"` - IsDeleted int `json:"is_deleted"` - RoleIds []int `json:"role_ids"` - Score int `json:"score"` + ID int64 `db:"id"` + Name string `db:"name"` + Status int64 `db:"status"` + Profile *Profile `db:"profile"` + IsDeleted int `db:"is_deleted"` + RoleIds []int `db:"role_ids"` + Score int `db:"score"` } type Profile struct { diff --git a/vendor/github.com/daodao97/fly/model.go b/vendor/github.com/daodao97/fly/model.go index 290c52fa..4fa43c1c 100644 --- a/vendor/github.com/daodao97/fly/model.go +++ b/vendor/github.com/daodao97/fly/model.go @@ -28,12 +28,13 @@ type model struct { fakeDelKey string primaryKey string columnHook map[string]HookData - columnValidator []Validator + columnValidator []Valid hasOne []HasOpts hasMany []HasOpts options *Options client *sql.DB saveZero bool + enableValidator bool err error } @@ -56,6 +57,7 @@ func New(table string, baseOpt ...With) *model { if m.client == nil { m.client, m.err = db(m.connection) } + m.enableValidator = true return m } @@ -163,15 +165,12 @@ func (m *model) Insert(record interface{}) (lastId int64, err error) { return 0, err } - for _, v := range m.columnValidator { - for _, h := range v.Handle { - ok, err := h(m, _record, _record[v.Field]) + if m.enableValidator { + for _, v := range m.columnValidator { + err = v(NewValidOpt(withRow(_record), WithModel(m))) if err != nil { return 0, err } - if !ok { - return 0, errors.New("ValidateHandle err " + v.Msg) - } } } @@ -220,15 +219,12 @@ func (m *model) Update(record interface{}, opt ...Option) (ok bool, err error) { return false, errors.New("empty record to update") } - for _, v := range m.columnValidator { - for _, h := range v.Handle { - ok, err := h(m, _record, _record[v.Field]) + if m.enableValidator { + for _, v := range m.columnValidator { + err = v(NewValidOpt(withRow(_record), WithModel(m))) if err != nil { return false, err } - if !ok { - return false, errors.New("ValidateHandle err " + v.Msg) - } } } @@ -262,6 +258,10 @@ func (m *model) Delete(opt ...Option) (ok bool, err error) { opt = append(opt, table(m.table)) if m.fakeDelKey != "" { + m.enableValidator = false + defer func() { + m.enableValidator = true + }() return m.Update(map[string]interface{}{m.fakeDelKey: 1}, opt...) } diff --git a/vendor/github.com/daodao97/fly/model_has.go b/vendor/github.com/daodao97/fly/model_has.go index e8e48f6d..085e638e 100644 --- a/vendor/github.com/daodao97/fly/model_has.go +++ b/vendor/github.com/daodao97/fly/model_has.go @@ -1,6 +1,7 @@ package fly import ( + "fmt" "regexp" "strings" @@ -54,6 +55,7 @@ func (m *model) hasOneData(rows []Row, opt HasOpts) ([]Row, error) { } if len(localKeys) == 0 { + _ = logger.Log(LevelDebug, "hasOneData empty localKeys", fmt.Sprintf("%+v", opt)) return rows, nil } @@ -98,6 +100,7 @@ func (m *model) hasManyData(rows []Row, opt HasOpts) ([]Row, error) { } if len(localKeys) == 0 { + _ = logger.Log(LevelDebug, "hasManyData empty localKeys", fmt.Sprintf("%+v", opt)) return rows, nil } diff --git a/vendor/github.com/daodao97/fly/model_with.go b/vendor/github.com/daodao97/fly/model_with.go index a640f598..dc43fa59 100644 --- a/vendor/github.com/daodao97/fly/model_with.go +++ b/vendor/github.com/daodao97/fly/model_with.go @@ -43,23 +43,21 @@ func ColumnHook(columnHook ...Hook) With { } // ColumnValidator while validate data by validator when create or update event -func ColumnValidator(validator ...Validator) With { +func ColumnValidator(validator ...[]Valid) With { return func(b *model) { if b.columnValidator == nil { - b.columnValidator = make([]Validator, 0, len(validator)) + b.columnValidator = make([]Valid, 0, len(validator)) + } + for _, v := range validator { + b.columnValidator = append(b.columnValidator, v...) } - b.columnValidator = append(b.columnValidator, validator...) } } -func Validate(field, msg string, handle ...ValidateHandleMaker) (v Validator) { - v.Field = field - v.Msg = msg - v.Handle = make([]ValidateHandle, 0, len(handle)) - for _, h := range handle { - v.Handle = append(v.Handle, h(field)) +func Validate(field string, vf ...Valid) (v []Valid) { + for _, each := range vf { + v = append(v, ValidWrap(each, NewValidOpt(withField(field)))) } - return v } diff --git a/vendor/github.com/daodao97/fly/validator.go b/vendor/github.com/daodao97/fly/validator.go index 8a646469..f13e2923 100644 --- a/vendor/github.com/daodao97/fly/validator.go +++ b/vendor/github.com/daodao97/fly/validator.go @@ -1,59 +1,146 @@ package fly import ( + "fmt" + + "github.com/pkg/errors" + "github.com/daodao97/fly/interval/xtype" ) -type Validator struct { - Field string - Msg string - Handle []ValidateHandle +type Valid = func(v *ValidInfo) error + +type ValidInfo struct { + Field string + Row map[string]interface{} + Model Model + Label string + Msg string +} + +func mergeOpt(v1, v2 *ValidInfo) *ValidInfo { + if v2.Row != nil { + v1.Row = v2.Row + } + if v2.Model != nil { + v1.Model = v2.Model + } + if v2.Field != "" { + v1.Field = v2.Field + } + if v2.Label != "" { + v1.Label = v2.Label + } + if v2.Msg != "" { + v1.Msg = v2.Msg + } + return v1 +} + +func ValidWrap(valid Valid, v1 *ValidInfo) Valid { + return func(v *ValidInfo) error { + return valid(mergeOpt(v, v1)) + } } -type ValidateHandle = func(m Model, row map[string]interface{}, val interface{}) (ok bool, err error) +type ValidOpt = func(*ValidInfo) -type ValidateHandleMaker = func(field string) ValidateHandle +func WithMsg(msg string) ValidOpt { + return func(v *ValidInfo) { + v.Msg = msg + } +} -// Required field value must exist and not zero -func Required(field string) ValidateHandle { - return func(m Model, row map[string]interface{}, val interface{}) (ok bool, err error) { - v, ok := row[field] +func withField(field string) ValidOpt { + return func(v *ValidInfo) { + v.Field = field + } +} + +func WithLabel(label string) ValidOpt { + return func(v *ValidInfo) { + v.Label = label + } +} + +func withRow(row map[string]interface{}) ValidOpt { + return func(v *ValidInfo) { + v.Row = row + } +} + +func WithModel(m Model) ValidOpt { + return func(v *ValidInfo) { + v.Model = m + } +} + +func ExtendValidOpt(v *ValidInfo, opt ...ValidOpt) *ValidInfo { + for _, o := range opt { + o(v) + } + return v +} + +func NewValidOpt(opt ...ValidOpt) *ValidInfo { + v := &ValidInfo{} + for _, o := range opt { + o(v) + } + return v +} + +func msg(msg1, msg2 string) string { + if msg2 != "" { + return msg2 + } + return msg1 +} + +func Required(opt ...ValidOpt) Valid { + v1 := NewValidOpt(opt...) + return ValidWrap(func(v *ValidInfo) error { + val, ok := v.Row[v.Field] if !ok { - return false, nil + return errors.New(msg(fmt.Sprintf("%s not found", v.Field), v.Msg)) } - return xtype.Bool(v), nil - } -} - -// IfRequired if field1 is existed, then field2 must exist and not zero -func IfRequired(field1 string) ValidateHandleMaker { - return func(field string) ValidateHandle { - return func(m Model, row map[string]interface{}, val interface{}) (ok bool, err error) { - h := Required(field1) - ok, err = h(m, row, row[field1]) - if err != nil { - return false, err - } - if !ok { - return true, nil - } - h = Required(field) - return h(m, row, row[field]) + if !xtype.Bool(val) { + return errors.New(msg(fmt.Sprintf("%s value is zero value", v.Field), v.Msg)) } - } + return nil + }, v1) +} + +func IfRequired(ifField string, opt ...ValidOpt) Valid { + v1 := NewValidOpt(opt...) + return ValidWrap(func(v *ValidInfo) error { + if err := Required()(NewValidOpt(withField(ifField), withRow(v.Row), WithModel(v.Model))); err != nil { + return nil + } + + return Required()(NewValidOpt( + withField(v.Field), + withRow(v.Row), + WithModel(v.Model), + WithMsg(msg(fmt.Sprintf("当 %s 存在时 %s 是必须的", ifField, v.Field), v.Msg)), + )) + }, v1) } -// Unique field value must unique in current table -func Unique(field string) ValidateHandle { - return func(m Model, row map[string]interface{}, val interface{}) (ok bool, err error) { - opts := []Option{WhereEq(field, val)} - if id, ok := row[m.PrimaryKey()]; ok { - opts = append(opts, WhereNotEq(m.PrimaryKey(), id)) +func Unique(opt ...ValidOpt) Valid { + v1 := NewValidOpt(opt...) + return ValidWrap(func(v *ValidInfo) error { + opts := []Option{WhereEq(v.Field, v.Row[v.Field])} + if id, ok := v.Row[v.Model.PrimaryKey()]; ok { + opts = append(opts, WhereNotEq(v.Model.PrimaryKey(), id)) } - count, err := m.Count(opts...) + count, err := v.Model.Count(opts...) if err != nil { - return false, err + return err } - return count == 0, nil - } + if count != 0 { + return errors.New(msg("Duplicate data", v.Msg)) + } + return nil + }, v1) } diff --git a/vendor/github.com/denisenkom/go-mssqldb/error.go b/vendor/github.com/denisenkom/go-mssqldb/error.go index 09964b87..e60288a6 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/error.go +++ b/vendor/github.com/denisenkom/go-mssqldb/error.go @@ -27,6 +27,10 @@ func (e Error) Error() string { return "mssql: " + e.Message } +func (e Error) String() string { + return e.Message +} + // SQLErrorNumber returns the SQL Server error number. func (e Error) SQLErrorNumber() int32 { return e.Number diff --git a/vendor/github.com/denisenkom/go-mssqldb/token.go b/vendor/github.com/denisenkom/go-mssqldb/token.go index 43039d3d..61c88fcb 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/token.go +++ b/vendor/github.com/denisenkom/go-mssqldb/token.go @@ -787,7 +787,7 @@ func processSingleResponse(ctx context.Context, sess *tdsSession, ch chan tokenS sess.logger.Log(ctx, msdsn.LogMessages, info.Message) } if outs.msgq != nil { - _ = sqlexp.ReturnMessageEnqueue(ctx, outs.msgq, sqlexp.MsgNotice{Message: info.Message}) + _ = sqlexp.ReturnMessageEnqueue(ctx, outs.msgq, sqlexp.MsgNotice{Message: info}) } case tokenReturnValue: nv := parseReturnValue(sess.buf) diff --git a/vendor/github.com/edsrzf/mmap-go/.gitignore b/vendor/github.com/edsrzf/mmap-go/.gitignore index 9aa02c1e..6c694e4b 100644 --- a/vendor/github.com/edsrzf/mmap-go/.gitignore +++ b/vendor/github.com/edsrzf/mmap-go/.gitignore @@ -6,3 +6,6 @@ _obj _test testdata +/.idea +*.iml +/notes.txt diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md index 4cc2bfe1..1ac39f7e 100644 --- a/vendor/github.com/edsrzf/mmap-go/README.md +++ b/vendor/github.com/edsrzf/mmap-go/README.md @@ -1,12 +1,14 @@ mmap-go ======= +![Build Status](https://github.com/edsrzf/mmap-go/actions/workflows/build-test.yml/badge.svg) +[![Go Reference](https://pkg.go.dev/badge/github.com/edsrzf/mmap-go.svg)](https://pkg.go.dev/github.com/edsrzf/mmap-go) mmap-go is a portable mmap package for the [Go programming language](http://golang.org). -It has been tested on Linux (386, amd64), OS X, and Windows (386). It should also -work on other Unix-like platforms, but hasn't been tested with them. I'm interested -to hear about the results. - -I haven't been able to add more features without adding significant complexity, -so mmap-go doesn't support mprotect, mincore, and maybe a few other things. -If you're running on a Unix-like platform and need some of these features, -I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap). + +Operating System Support +======================== +This package is tested using GitHub Actions on Linux, macOS, and Windows. It should also work on other Unix-like platforms, but hasn't been tested with them. I'm interested to hear about the results. + +I haven't been able to add more features without adding significant complexity, so mmap-go doesn't support `mprotect`, `mincore`, and maybe a few other things. If you're running on a Unix-like platform and need some of these features, I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap). + +This package compiles on Plan 9, but its functions always return errors. diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go b/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go new file mode 100644 index 00000000..e4c33d39 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go @@ -0,0 +1,27 @@ +// Copyright 2020 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import "syscall" + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + return nil, syscall.EPLAN9 +} + +func (m MMap) flush() error { + return syscall.EPLAN9 +} + +func (m MMap) lock() error { + return syscall.EPLAN9 +} + +func (m MMap) unlock() error { + return syscall.EPLAN9 +} + +func (m MMap) unmap() error { + return syscall.EPLAN9 +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go index 7910da25..e0d986f7 100644 --- a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go +++ b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go @@ -22,8 +22,9 @@ import ( // We keep this map so that we can get back the original handle from the memory address. type addrinfo struct { - file windows.Handle - mapview windows.Handle + file windows.Handle + mapview windows.Handle + writable bool } var handleLock sync.Mutex @@ -32,13 +33,16 @@ var handleMap = map[uintptr]*addrinfo{} func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { flProtect := uint32(windows.PAGE_READONLY) dwDesiredAccess := uint32(windows.FILE_MAP_READ) + writable := false switch { case prot© != 0: flProtect = windows.PAGE_WRITECOPY dwDesiredAccess = windows.FILE_MAP_COPY + writable = true case prot&RDWR != 0: flProtect = windows.PAGE_READWRITE dwDesiredAccess = windows.FILE_MAP_WRITE + writable = true } if prot&EXEC != 0 { flProtect <<= 4 @@ -63,12 +67,14 @@ func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { fileOffsetLow := uint32(off & 0xFFFFFFFF) addr, errno := windows.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len)) if addr == 0 { + windows.CloseHandle(windows.Handle(h)) return nil, os.NewSyscallError("MapViewOfFile", errno) } handleLock.Lock() handleMap[addr] = &addrinfo{ - file: windows.Handle(hfile), - mapview: h, + file: windows.Handle(hfile), + mapview: h, + writable: writable, } handleLock.Unlock() @@ -96,8 +102,13 @@ func (m MMap) flush() error { return errors.New("unknown base address") } - errno = windows.FlushFileBuffers(handle.file) - return os.NewSyscallError("FlushFileBuffers", errno) + if handle.writable && handle.file != windows.Handle(^uintptr(0)) { + if err := windows.FlushFileBuffers(handle.file); err != nil { + return os.NewSyscallError("FlushFileBuffers", err) + } + } + + return nil } func (m MMap) lock() error { diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml deleted file mode 100644 index b22f8f54..00000000 --- a/vendor/github.com/emicklei/go-restful/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.x - -script: go test -v \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index b40081cc..00000000 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: test - -test: - go test -v . - -ex: - cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/v3/.gitignore similarity index 99% rename from vendor/github.com/emicklei/go-restful/.gitignore rename to vendor/github.com/emicklei/go-restful/v3/.gitignore index cece7be6..446be09b 100644 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ b/vendor/github.com/emicklei/go-restful/v3/.gitignore @@ -68,3 +68,4 @@ examples/restful-html-template s.html restful-path-tail +.idea diff --git a/vendor/github.com/emicklei/go-restful/v3/.goconvey b/vendor/github.com/emicklei/go-restful/v3/.goconvey new file mode 100644 index 00000000..8485e986 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml new file mode 100644 index 00000000..3a0bf5ff --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.x + +before_install: + - go test -v + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md similarity index 74% rename from vendor/github.com/emicklei/go-restful/CHANGES.md rename to vendor/github.com/emicklei/go-restful/v3/CHANGES.md index e5252963..38169cfd 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,7 +1,106 @@ -## Change history of go-restful +# Change history of go-restful +## [v3.8.0] - 20221-06-06 + +- use exact matching of allowed domain entries, issue #489 (#493) + - this changes fixes [security] Authorization Bypass Through User-Controlled Key + by changing the behaviour of the AllowedDomains setting in the CORS filter. + To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc + callback mechanism which is called when a simple domain match fails. +- add test and fix for POST without body and Content-type, issue #492 (#496) +- [Minor] Bad practice to have a mix of Receiver types. (#491) + +## [v3.7.2] - 2021-11-24 + +- restored FilterChain (#482 by SVilgelm) + + +## [v3.7.1] - 2021-10-04 + +- fix problem with contentEncodingEnabled setting (#479) + +## [v3.7.0] - 2021-09-24 + +- feat(parameter): adds additional openapi mappings (#478) + +## [v3.6.0] - 2021-09-18 + +- add support for vendor extensions (#477 thx erraggy) + +## [v3.5.2] - 2021-07-14 + +- fix removing absent route from webservice (#472) + +## [v3.5.1] - 2021-04-12 + +- fix handling no match access selected path +- remove obsolete field + +## [v3.5.0] - 2021-04-10 + +- add check for wildcard (#463) in CORS +- add access to Route from Request, issue #459 (#462) + +## [v3.4.0] - 2020-11-10 + +- Added OPTIONS to WebService + +## [v3.3.2] - 2020-01-23 + +- Fixed duplicate compression in dispatch. #449 + + +## [v3.3.1] - 2020-08-31 + +- Added check on writer to prevent compression of response twice. #447 + +## [v3.3.0] - 2020-08-19 + +- Enable content encoding on Handle and ServeHTTP (#446) +- List available representations in 406 body (#437) +- Convert to string using rune() (#443) + +## [v3.2.0] - 2020-06-21 + +- 405 Method Not Allowed must have Allow header (#436) (thx Bracken ) +- add field allowedMethodsWithoutContentType (#424) + +## [v3.1.0] + +- support describing response headers (#426) +- fix openapi examples (#425) + +v3.0.0 + +- fix: use request/response resulting from filter chain +- add Go module + Module consumer should use github.com/emicklei/go-restful/v3 as import path + +v2.10.0 + +- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>) +- fixed static example (thanks Arthur ) +- simplify code (thanks Christian Muehlhaeuser ) +- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben ) + +v2.9.6 + +- small optimization in filter code + +v2.11.1 + +- fix WriteError return value (#415) + +v2.11.0 + +- allow prefix and suffix in path variable expression (#414) + +v2.9.6 + +- support google custome verb (#413) v2.9.5 + - fix panic in Response.WriteError if err == nil v2.9.4 diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/v3/LICENSE similarity index 100% rename from vendor/github.com/emicklei/go-restful/LICENSE rename to vendor/github.com/emicklei/go-restful/v3/LICENSE diff --git a/vendor/github.com/emicklei/go-restful/v3/Makefile b/vendor/github.com/emicklei/go-restful/v3/Makefile new file mode 100644 index 00000000..16d0b80b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/Makefile @@ -0,0 +1,8 @@ +all: test + +test: + go vet . + go test -cover -v . + +ex: + find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {} \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md similarity index 76% rename from vendor/github.com/emicklei/go-restful/README.md rename to vendor/github.com/emicklei/go-restful/v3/README.md index f52c25ac..23166d3b 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -4,9 +4,10 @@ package for building REST-style Web Services using Google Go [![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) +[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) +- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: @@ -18,6 +19,28 @@ REST asks developers to use HTTP methods explicitly and in a way that's consiste - PATCH = Update partial content of a resource - OPTIONS = Get information about the communication options for the request URI +### Usage + +#### Without Go Modules + +All versions up to `v2.*.*` (on the master) are not supporting Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful" +) +``` + +#### Using Go Modules + +As of version `v3.0.0` (on the v3 branch), this package supports Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful/v3" +) +``` + ### Example ```Go @@ -39,15 +62,15 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo } ``` -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) +[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) ### Features -- Routes for request → function mapping with path parameter (e.g. {id}) support +- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support - Configurable router: - - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} + - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) +- Request API for reading structs from JSON/XML and accessing parameters (path,query,header) - Response API for writing structs to JSON/XML and setting headers - Customizable encoding using EntityReaderWriter registration - Filters for intercepting the request → response flow on Service or Route level @@ -71,12 +94,11 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. +- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` ## Resources +- [Example programs](./examples) - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) - [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) @@ -85,4 +107,4 @@ TODO: write examples of these. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/SECURITY.md b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md new file mode 100644 index 00000000..810d3b51 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| v3.7.x | :white_check_mark: | +| < v3.0.1 | :x: | + +## Reporting a Vulnerability + +Create an Issue and put the label `[security]` in the title of the issue. +Valid reported security issues are expected to be solved within a week. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/v3/Srcfile similarity index 100% rename from vendor/github.com/emicklei/go-restful/Srcfile rename to vendor/github.com/emicklei/go-restful/v3/Srcfile diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go similarity index 92% rename from vendor/github.com/emicklei/go-restful/compress.go rename to vendor/github.com/emicklei/go-restful/v3/compress.go index 220b3771..1ff239f9 100644 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -83,7 +83,11 @@ func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error } // WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { +// It also inspects the httpWriter whether its content-encoding is already set (non-empty). +func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) { + if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" { + return false, "" + } header := httpRequest.Header.Get(HEADER_AcceptEncoding) gi := strings.Index(header, ENCODING_GZIP) zi := strings.Index(header, ENCODING_DEFLATE) diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_cache.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_cache.go diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_pools.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_pools.go diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/v3/compressors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressors.go rename to vendor/github.com/emicklei/go-restful/v3/compressors.go diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/constants.go rename to vendor/github.com/emicklei/go-restful/v3/constants.go diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/v3/container.go similarity index 78% rename from vendor/github.com/emicklei/go-restful/container.go rename to vendor/github.com/emicklei/go-restful/v3/container.go index 061a8d71..dd56246d 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/v3/container.go @@ -14,7 +14,7 @@ import ( "strings" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. @@ -185,6 +185,11 @@ func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) // when a ServiceError is returned during route selection. Default implementation // calls resp.WriteErrorString(err.Code, err.Message) func writeServiceError(err ServiceError, req *Request, resp *Response) { + for header, values := range err.Header { + for _, value := range values { + resp.Header().Add(header, value) + } + } resp.WriteErrorString(err.Code, err.Message) } @@ -201,6 +206,7 @@ func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.R // Dispatch the incoming Http Request to a matching WebService. func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // so we can assign a compressing one later writer := httpWriter // CompressingResponseWriter should be closed after all operations are done @@ -231,28 +237,8 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R c.webServices, httpRequest) }() - - // Detect if compression is needed - // assume without compression, test for override - contentEncodingEnabled := c.contentEncodingEnabled - if route != nil && route.contentEncodingEnabled != nil { - contentEncodingEnabled = *route.contentEncodingEnabled - } - if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - if err != nil { - // a non-200 response has already been written + // a non-200 response (may be compressed) has already been written // run container filters anyway ; they should not touch the response... chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { switch err.(type) { @@ -265,6 +251,29 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) return } + + // Unless httpWriter is already an CompressingResponseWriter see if we need to install one + if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing { + // Detect if compression is needed + // assume without compression, test for override + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + } + pathProcessor, routerProcessesPath := c.router.(PathProcessor) if !routerProcessesPath { pathProcessor = defaultPathProcessor{} @@ -272,16 +281,18 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { + if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 { // compose filter chain - allFilters := []FilterFunction{} + allFilters := make([]FilterFunction, 0, size) allFilters = append(allFilters, c.containerFilters...) allFilters = append(allFilters, webService.filters...) allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} + chain := FilterChain{ + Filters: allFilters, + Target: route.Function, + ParameterDocs: route.ParameterDocs, + Operation: route.Operation, + } chain.ProcessFilter(wrappedRequest, wrappedResponse) } else { // no filters, handle request by route @@ -299,13 +310,75 @@ func fixedPrefixPath(pathspec string) string { } // ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) +func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if content encoding is disabled + if !c.contentEncodingEnabled { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + // content encoding is enabled + + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + + c.ServeMux.ServeHTTP(writer, httpRequest) } // Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) + c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + handler.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + if c.contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + + handler.ServeHTTP(writer, httpRequest) + })) } // HandleWithFilter registers the handler for the given pattern. @@ -319,7 +392,7 @@ func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { } chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) + handler.ServeHTTP(resp, req.Request) }} chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) } diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go similarity index 81% rename from vendor/github.com/emicklei/go-restful/cors_filter.go rename to vendor/github.com/emicklei/go-restful/v3/cors_filter.go index 1efeef07..9d18dfb7 100644 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go @@ -18,9 +18,22 @@ import ( // http://enable-cors.org/server.html // http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. + ExposeHeaders []string // list of Header names + + // AllowedHeaders is alist of Header names. Checking is case-insensitive. + // The list may contain the special wildcard string ".*" ; all is allowed + AllowedHeaders []string + + // AllowedDomains is a list of allowed values for Http Origin. + // The list may contain the special wildcard string ".*" ; all is allowed + // If empty all are allowed. + AllowedDomains []string + + // AllowedDomainFunc is optional and is a function that will do the check + // when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*". + AllowedDomainFunc func(origin string) bool + + // AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive. AllowedMethods []string MaxAge int // number of seconds before requiring new Options request CookiesAllowed bool @@ -119,36 +132,24 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { if len(origin) == 0 { return false } + lowerOrigin := strings.ToLower(origin) if len(c.AllowedDomains) == 0 { + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(lowerOrigin) + } return true } - allowed := false + // exact match on each allowed domain for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break + if domain == ".*" || strings.ToLower(domain) == lowerOrigin { + return true } } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(origin) } - - return allowed + return false } func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { @@ -184,19 +185,9 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str if strings.ToLower(each) == strings.ToLower(header) { return true } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err + if each == "*" { + return true } - regexps = append(regexps, r) } - return regexps, nil + return false } diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go similarity index 93% rename from vendor/github.com/emicklei/go-restful/curly.go rename to vendor/github.com/emicklei/go-restful/v3/curly.go index 14d5b76b..ba1fc5d5 100644 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -47,7 +47,7 @@ func (c CurlyRouter) SelectRoute( func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) + matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) if matches { candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? } @@ -57,7 +57,7 @@ func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortab } // matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) { if len(routeTokens) < len(requestTokens) { // proceed in matching only if last routeToken is wildcard count := len(routeTokens) @@ -72,6 +72,15 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] + if routeHasCustomVerb && hasCustomVerb(routeToken){ + if !isMatchCustomVerb(routeToken, requestToken) { + return false, 0, 0 + } + staticCount++ + requestToken = removeCustomVerb(requestToken) + routeToken = removeCustomVerb(routeToken) + } + if strings.HasPrefix(routeToken, "{") { paramCount++ if colon := strings.Index(routeToken, ":"); colon != -1 { diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/v3/curly_route.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/curly_route.go rename to vendor/github.com/emicklei/go-restful/v3/curly_route.go diff --git a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go new file mode 100644 index 00000000..bfc17efd --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go @@ -0,0 +1,29 @@ +package restful + +import ( + "fmt" + "regexp" +) + +var ( + customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") +) + +func hasCustomVerb(routeToken string) bool { + return customVerbReg.MatchString(routeToken) +} + +func isMatchCustomVerb(routeToken string, pathToken string) bool { + rs := customVerbReg.FindStringSubmatch(routeToken) + if len(rs) < 2 { + return false + } + + customVerb := rs[1] + specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) + return specificVerbReg.MatchString(pathToken) +} + +func removeCustomVerb(str string) string { + return customVerbReg.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/doc.go rename to vendor/github.com/emicklei/go-restful/v3/doc.go index f7c16b01..69b13057 100644 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -28,7 +28,7 @@ This package has the logic to find the best matching Route and if found, call it The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. Regular expression matching Routes @@ -82,7 +82,7 @@ These are processed before calling the function associated with the Route. // install 2 chained route filters (processed before calling findUser) ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. Response Encoding @@ -93,7 +93,7 @@ Two encodings are supported: gzip and deflate. To enable this for all responses: If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go +See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go OPTIONS support diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/entity_accessors.go rename to vendor/github.com/emicklei/go-restful/v3/entity_accessors.go diff --git a/vendor/github.com/emicklei/go-restful/v3/extensions.go b/vendor/github.com/emicklei/go-restful/v3/extensions.go new file mode 100644 index 00000000..5023fa04 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/extensions.go @@ -0,0 +1,21 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// ExtensionProperties provides storage of vendor extensions for entities +type ExtensionProperties struct { + // Extensions vendor extensions used to describe extra functionality + // (https://swagger.io/docs/specification/2-0/swagger-extensions/) + Extensions map[string]interface{} +} + +// AddExtension adds or updates a key=value pair to the extension map. +func (ep *ExtensionProperties) AddExtension(key string, value interface{}) { + if ep.Extensions == nil { + ep.Extensions = map[string]interface{}{key: value} + } else { + ep.Extensions[key] = value + } +} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/v3/filter.go similarity index 79% rename from vendor/github.com/emicklei/go-restful/filter.go rename to vendor/github.com/emicklei/go-restful/v3/filter.go index c23bfb59..fd88c536 100644 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ b/vendor/github.com/emicklei/go-restful/v3/filter.go @@ -6,9 +6,11 @@ package restful // FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters + ParameterDocs []*Parameter // the parameter docs for the route + Operation string // the name of the operation } // ProcessFilter passes the request,response pair through the next of Filters. diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/json.go rename to vendor/github.com/emicklei/go-restful/v3/json.go diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/jsoniter.go rename to vendor/github.com/emicklei/go-restful/v3/jsoniter.go diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go similarity index 89% rename from vendor/github.com/emicklei/go-restful/jsr311.go rename to vendor/github.com/emicklei/go-restful/v3/jsr311.go index 3ede1891..07a0c91e 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "sort" + "strings" ) // RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) @@ -98,7 +99,18 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") + allowed := []string{} + allowedLoop: + for _, candidate := range previous { + for _, method := range allowed { + if method == candidate.Method { + continue allowedLoop + } + } + allowed = append(allowed, candidate.Method) + } + header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}} + return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header) } // content-type @@ -135,7 +147,24 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") + available := []string{} + for _, candidate := range previous { + available = append(available, candidate.Produces...) + } + // if POST,PUT,PATCH without body + method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") + if (method == http.MethodPost || + method == http.MethodPut || + method == http.MethodPatch) && length == "" { + return nil, NewError( + http.StatusUnsupportedMediaType, + fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) + } + return nil, NewError( + http.StatusNotAcceptable, + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/v3/log/log.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/log/log.go rename to vendor/github.com/emicklei/go-restful/v3/log/log.go diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/v3/logger.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/logger.go rename to vendor/github.com/emicklei/go-restful/v3/logger.go index 6595df00..29202726 100644 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ b/vendor/github.com/emicklei/go-restful/v3/logger.go @@ -4,7 +4,7 @@ package restful // Use of this source code is governed by a license // that can be found in the LICENSE file. import ( - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) var trace bool = false diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/v3/mime.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/mime.go rename to vendor/github.com/emicklei/go-restful/v3/mime.go diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/v3/options_filter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/options_filter.go rename to vendor/github.com/emicklei/go-restful/v3/options_filter.go diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/v3/parameter.go similarity index 58% rename from vendor/github.com/emicklei/go-restful/parameter.go rename to vendor/github.com/emicklei/go-restful/v3/parameter.go index e8793304..0e658af5 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/v3/parameter.go @@ -1,5 +1,7 @@ package restful +import "sort" + // Copyright 2013 Ernest Micklei. All rights reserved. // Use of this source code is governed by a license // that can be found in the LICENSE file. @@ -52,13 +54,25 @@ type Parameter struct { // ParameterData represents the state of a Parameter. // It is made public to make it accessible to e.g. the Swagger package. type ParameterData struct { + ExtensionProperties Name, Description, DataType, DataFormat string Kind int Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string - CollectionFormat string + // AllowableValues is deprecated. Use PossibleValues instead + AllowableValues map[string]string + PossibleValues []string + AllowMultiple bool + AllowEmptyValue bool + DefaultValue string + CollectionFormat string + Pattern string + Minimum *float64 + Maximum *float64 + MinLength *int64 + MaxLength *int64 + MinItems *int64 + MaxItems *int64 + UniqueItems bool } // Data returns the state of the Parameter @@ -106,9 +120,38 @@ func (p *Parameter) AllowMultiple(multiple bool) *Parameter { return p } -// AllowableValues sets the allowableValues field and returns the receiver +// AddExtension adds or updates a key=value pair to the extension map +func (p *Parameter) AddExtension(key string, value interface{}) *Parameter { + p.data.AddExtension(key, value) + return p +} + +// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver +func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter { + p.data.AllowEmptyValue = multiple + return p +} + +// AllowableValues is deprecated. Use PossibleValues instead. Both will be set. func (p *Parameter) AllowableValues(values map[string]string) *Parameter { p.data.AllowableValues = values + + allowableSortedKeys := make([]string, 0, len(values)) + for k := range values { + allowableSortedKeys = append(allowableSortedKeys, k) + } + sort.Strings(allowableSortedKeys) + + p.data.PossibleValues = make([]string, 0, len(values)) + for _, k := range allowableSortedKeys { + p.data.PossibleValues = append(p.data.PossibleValues, values[k]) + } + return p +} + +// PossibleValues sets the possible values field and returns the receiver +func (p *Parameter) PossibleValues(values []string) *Parameter { + p.data.PossibleValues = values return p } @@ -141,3 +184,51 @@ func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { p.data.CollectionFormat = format.String() return p } + +// Pattern sets the pattern field and returns the receiver +func (p *Parameter) Pattern(pattern string) *Parameter { + p.data.Pattern = pattern + return p +} + +// Minimum sets the minimum field and returns the receiver +func (p *Parameter) Minimum(minimum float64) *Parameter { + p.data.Minimum = &minimum + return p +} + +// Maximum sets the maximum field and returns the receiver +func (p *Parameter) Maximum(maximum float64) *Parameter { + p.data.Maximum = &maximum + return p +} + +// MinLength sets the minLength field and returns the receiver +func (p *Parameter) MinLength(minLength int64) *Parameter { + p.data.MinLength = &minLength + return p +} + +// MaxLength sets the maxLength field and returns the receiver +func (p *Parameter) MaxLength(maxLength int64) *Parameter { + p.data.MaxLength = &maxLength + return p +} + +// MinItems sets the minItems field and returns the receiver +func (p *Parameter) MinItems(minItems int64) *Parameter { + p.data.MinItems = &minItems + return p +} + +// MaxItems sets the maxItems field and returns the receiver +func (p *Parameter) MaxItems(maxItems int64) *Parameter { + p.data.MaxItems = &maxItems + return p +} + +// UniqueItems sets the uniqueItems field and returns the receiver +func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter { + p.data.UniqueItems = uniqueItems + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/v3/path_expression.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/path_expression.go rename to vendor/github.com/emicklei/go-restful/v3/path_expression.go diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/v3/path_processor.go similarity index 79% rename from vendor/github.com/emicklei/go-restful/path_processor.go rename to vendor/github.com/emicklei/go-restful/v3/path_processor.go index 357c723a..14157324 100644 --- a/vendor/github.com/emicklei/go-restful/path_processor.go +++ b/vendor/github.com/emicklei/go-restful/v3/path_processor.go @@ -29,7 +29,12 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath } else { value = urlParts[i] } - if strings.HasPrefix(key, "{") { // path-parameter + if r.hasCustomVerb && hasCustomVerb(key) { + key = removeCustomVerb(key) + value = removeCustomVerb(value) + } + + if strings.Index(key, "{") > -1 { // path-parameter if colon := strings.Index(key, ":"); colon != -1 { // extract by regex regPart := key[colon+1 : len(key)-1] @@ -42,7 +47,13 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath } } else { // without enclosing {} - pathParameters[key[1:len(key)-1]] = value + startIndex := strings.Index(key, "{") + endKeyIndex := strings.Index(key, "}") + + suffixLength := len(key) - endKeyIndex - 1 + endValueIndex := len(value) - suffixLength + + pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex] } } } diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go similarity index 85% rename from vendor/github.com/emicklei/go-restful/request.go rename to vendor/github.com/emicklei/go-restful/v3/request.go index a20730fe..5725a075 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -13,10 +13,10 @@ var defaultRequestContentType string // Request is a wrapper for a http Request that provides convenience methods type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoute *Route // is nil when no route was matched } func NewRequest(httpRequest *http.Request) *Request { @@ -113,6 +113,20 @@ func (r Request) Attribute(name string) interface{} { } // SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +// If no route was matched then return an empty string. func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath + if r.selectedRoute == nil { + return "" + } + // skip creating an accessor + return r.selectedRoute.Path +} + +// SelectedRoute returns a reader to access the selected Route by the container +// Returns nil if no route was matched. +func (r Request) SelectedRoute() RouteReader { + if r.selectedRoute == nil { + return nil + } + return routeAccessor{route: r.selectedRoute} } diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/response.go rename to vendor/github.com/emicklei/go-restful/v3/response.go index fbb48f2d..8f0b56aa 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -174,15 +174,16 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType return writeJSON(r, status, contentType, value) } -// WriteError write the http status and the error string on the response. err can be nil. -func (r *Response) WriteError(httpStatus int, err error) error { +// WriteError writes the http status and the error string on the response. err can be nil. +// Return an error if writing was not successful. +func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { r.err = err if err == nil { - r.WriteErrorString(httpStatus, "") + writeErr = r.WriteErrorString(httpStatus, "") } else { - r.WriteErrorString(httpStatus, err.Error()) + writeErr = r.WriteErrorString(httpStatus, err.Error()) } - return err + return writeErr } // WriteServiceError is a convenience method for a responding with a status and a ServiceError diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go similarity index 84% rename from vendor/github.com/emicklei/go-restful/route.go rename to vendor/github.com/emicklei/go-restful/v3/route.go index 6d15dbf6..193f4a6b 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -19,6 +19,7 @@ type RouteSelectionConditionFunction func(httpRequest *http.Request) bool // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. type Route struct { + ExtensionProperties Method string Produces []string Consumes []string @@ -49,35 +50,33 @@ type Route struct { //Overrides the container.contentEncodingEnabled contentEncodingEnabled *bool + + // indicate route path has custom verb + hasCustomVerb bool + + // if a request does not include a content-type header then + // depending on the method, it may return a 415 Unsupported Media + // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... + allowedMethodsWithoutContentType []string } // Initialize for Route func (r *Route) postBuild() { r.pathParts = tokenizePath(r.Path) + r.hasCustomVerb = hasCustomVerb(r.Path) } // Create Request and Response from their http versions func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { wrappedRequest := NewRequest(httpRequest) wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoutePath = r.Path + wrappedRequest.selectedRoute = r wrappedResponse := NewResponse(httpWriter) wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) wrappedResponse.routeProduces = r.Produces return wrappedRequest, wrappedResponse } -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - func stringTrimSpaceCutset(r rune) bool { return r == ' ' } @@ -121,8 +120,17 @@ func (r Route) matchesContentType(mimeTypes string) bool { if len(mimeTypes) == 0 { // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true + // if route specifies less or non-idempotent methods then use that + if len(r.allowedMethodsWithoutContentType) > 0 { + for _, each := range r.allowedMethodsWithoutContentType { + if m == each { + return true + } + } + } else { + if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { + return true + } } // proceed with default mimeTypes = MIME_OCTET @@ -160,11 +168,11 @@ func tokenizePath(path string) []string { } // for debugging -func (r Route) String() string { +func (r *Route) String() string { return r.Method + " " + r.Path } // EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r Route) EnableContentEncoding(enabled bool) { +func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go similarity index 75% rename from vendor/github.com/emicklei/go-restful/route_builder.go rename to vendor/github.com/emicklei/go-restful/v3/route_builder.go index 0fccf61e..23641b6d 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -12,19 +12,20 @@ import ( "strings" "sync/atomic" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // RouteBuilder is a helper to construct Routes. type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - conditions []RouteSelectionConditionFunction + rootPath string + currentPath string + produces []string + consumes []string + httpMethod string // required + function RouteFunction // required + filters []FilterFunction + conditions []RouteSelectionConditionFunction + allowedMethodsWithoutContentType []string // see Route typeNameHandleFunc TypeNameHandleFunction // required @@ -37,6 +38,7 @@ type RouteBuilder struct { errorMap map[int]ResponseError defaultResponse *ResponseError metadata map[string]interface{} + extensions map[string]interface{} deprecated bool contentEncodingEnabled *bool } @@ -176,6 +178,15 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou return b } +// ReturnsWithHeaders is similar to Returns, but can specify response headers +func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder { + b.Returns(code, message, model) + err := b.errorMap[code] + err.Headers = headers + b.errorMap[code] = err + return b +} + // DefaultReturns is a special Returns call that sets the default of the response. func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { b.defaultResponse = &ResponseError{ @@ -194,20 +205,57 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// AddExtension adds or updates a key=value pair to the extensions map. +func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder { + if b.extensions == nil { + b.extensions = map[string]interface{}{} + } + b.extensions[key] = value + return b +} + // Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use func (b *RouteBuilder) Deprecate() *RouteBuilder { b.deprecated = true return b } +// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE +// If a request does not include a content-type header then +// depending on the method, it may return a 415 Unsupported Media. +// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... +func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder { + b.allowedMethodsWithoutContentType = methods + return b +} + // ResponseError represents a response; not necessarily an error. type ResponseError struct { + ExtensionProperties Code int Message string Model interface{} + Headers map[string]Header IsDefault bool } +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + *Items + Description string +} + +// Items describe swagger simple schemas for headers +type Items struct { + Type string + Format string + Items *Items + CollectionFormat string + Default interface{} +} + func (b *RouteBuilder) servicePath(path string) *RouteBuilder { b.rootPath = path return b @@ -276,27 +324,29 @@ func (b *RouteBuilder) Build() Route { operationName = nameOfFunction(b.function) } route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - DefaultResponse: b.defaultResponse, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata, - Deprecated: b.deprecated, - contentEncodingEnabled: b.contentEncodingEnabled, + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata, + Deprecated: b.deprecated, + contentEncodingEnabled: b.contentEncodingEnabled, + allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + route.Extensions = b.extensions route.postBuild() return route } diff --git a/vendor/github.com/emicklei/go-restful/v3/route_reader.go b/vendor/github.com/emicklei/go-restful/v3/route_reader.go new file mode 100644 index 00000000..c9f4ee75 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/route_reader.go @@ -0,0 +1,66 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +type RouteReader interface { + Method() string + Consumes() []string + Path() string + Doc() string + Notes() string + Operation() string + ParameterDocs() []*Parameter + // Returns a copy + Metadata() map[string]interface{} + Deprecated() bool +} + +type routeAccessor struct { + route *Route +} + +func (r routeAccessor) Method() string { + return r.route.Method +} +func (r routeAccessor) Consumes() []string { + return r.route.Consumes[:] +} +func (r routeAccessor) Path() string { + return r.route.Path +} +func (r routeAccessor) Doc() string { + return r.route.Doc +} +func (r routeAccessor) Notes() string { + return r.route.Notes +} +func (r routeAccessor) Operation() string { + return r.route.Operation +} +func (r routeAccessor) ParameterDocs() []*Parameter { + return r.route.ParameterDocs[:] +} + +// Returns a copy +func (r routeAccessor) Metadata() map[string]interface{} { + return copyMap(r.route.Metadata) +} +func (r routeAccessor) Deprecated() bool { + return r.route.Deprecated +} + +// https://stackoverflow.com/questions/23057785/how-to-copy-a-map +func copyMap(m map[string]interface{}) map[string]interface{} { + cp := make(map[string]interface{}) + for k, v := range m { + vm, ok := v.(map[string]interface{}) + if ok { + cp[k] = copyMap(vm) + } else { + cp[k] = v + } + } + return cp +} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/v3/router.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/router.go rename to vendor/github.com/emicklei/go-restful/v3/router.go diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/v3/service_error.go similarity index 70% rename from vendor/github.com/emicklei/go-restful/service_error.go rename to vendor/github.com/emicklei/go-restful/v3/service_error.go index 62d1108b..a4157546 100644 --- a/vendor/github.com/emicklei/go-restful/service_error.go +++ b/vendor/github.com/emicklei/go-restful/v3/service_error.go @@ -4,12 +4,16 @@ package restful // Use of this source code is governed by a license // that can be found in the LICENSE file. -import "fmt" +import ( + "fmt" + "net/http" +) // ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. type ServiceError struct { Code int Message string + Header http.Header } // NewError returns a ServiceError using the code and reason @@ -17,6 +21,11 @@ func NewError(code int, message string) ServiceError { return ServiceError{Code: code, Message: message} } +// NewErrorWithHeader returns a ServiceError using the code, reason and header +func NewErrorWithHeader(code int, message string, header http.Header) ServiceError { + return ServiceError{Code: code, Message: message, Header: header} +} + // Error returns a text representation of the service error func (s ServiceError) Error() string { return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/v3/web_service.go similarity index 93% rename from vendor/github.com/emicklei/go-restful/web_service.go rename to vendor/github.com/emicklei/go-restful/v3/web_service.go index 77ba9a8c..0bf5d1e5 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/v3/web_service.go @@ -6,7 +6,7 @@ import ( "reflect" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Copyright 2013 Ernest Micklei. All rights reserved. @@ -176,22 +176,20 @@ func (w *WebService) Route(builder *RouteBuilder) *WebService { // RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current = current + 1 - } - w.routes = newRoutes - return nil + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := []Route{} + for _, route := range w.routes { + if route.Method == method && route.Path == path { + continue + } + newRoutes = append(newRoutes, route) + } + w.routes = newRoutes + return nil } // Method creates a new RouteBuilder and initialize its http method @@ -288,3 +286,8 @@ func (w *WebService) PATCH(subPath string) *RouteBuilder { func (w *WebService) DELETE(subPath string) *RouteBuilder { return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) } + +// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath) +func (w *WebService) OPTIONS(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath) +} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/v3/web_service_container.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/web_service_container.go rename to vendor/github.com/emicklei/go-restful/v3/web_service_container.go diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000..fad89585 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 00000000..32f1001b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000..4cd0cbaf --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 00000000..a04f2907 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000..6cbabe5e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,62 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Alexey Kazakov +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Brian Goff +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Eric Lin +Evan Phoenix +Francisco Souza +Gautam Dey +Hari haran +Ichinose Shogo +Johannes Ebke +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Matthias Stone +Nathan Youngman +Nickolai Zeldovich +Oliver Bristow +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pratik Shinde +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tobias Klauser +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000..cc01c08f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,357 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000..8a642563 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE similarity index 95% rename from vendor/github.com/PuerkitoBio/urlesc/LICENSE rename to vendor/github.com/fsnotify/fsnotify/LICENSE index 74487567..e180c8fb 100644 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000..0731c5ef --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,120 @@ +# File system notifications for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) + +fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000..b3ac3d8f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,38 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000..0f4ee52e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go new file mode 100644 index 00000000..59688559 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000..a6d0e0ec --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,351 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000..b572a37c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000..6fb8d853 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,535 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000..36cc3845 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000..98cd8476 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000..02ce7deb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,586 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + // TODO: Consider using unsafe.Slice that is available from go1.17 + // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang + // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := syscall.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 00000000..8956c308 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,63 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 3bc0a6e2..cfdef03e 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -30,8 +30,8 @@ import ( "net/url" "strings" - "github.com/PuerkitoBio/purell" "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" ) const ( @@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error { return err } - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + internal.NormalizeURL(parsed) + + r.referenceURL = parsed refURL := r.referenceURL if refURL.Scheme != "" && refURL.Host != "" { diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 00000000..49ad5276 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 813c47aa..2a4a71f3 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -37,3 +37,14 @@ linters: - gci - gocognit - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a887..00000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 00000000..16accc55 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d3..f5228b82 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8b..7c7da9c0 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f339..2757d9b9 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d..0565db37 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11cccca..00000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index ac3b93b1..00000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,400 +0,0 @@ -// +build go1.7 - -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - frame runtime.Frame -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - // As of Go 1.9 we need room for up to three PC entries. - // - // 0. An entry for the stack frame prior to the target to check for - // special handling needed if that prior entry is runtime.sigpanic. - // 1. A possible second entry to hold metadata about skipped inlined - // functions. If inline functions were not skipped the target frame - // PC will be here. - // 2. A third entry for the target frame PC when the second entry - // is used for skipped inline functions. - var pcs [3]uintptr - n := runtime.Callers(skip+1, pcs[:]) - frames := runtime.CallersFrames(pcs[:n]) - frame, _ := frames.Next() - frame, _ = frames.Next() - - return Call{ - frame: frame, - } -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.frame == (runtime.Frame{}) { - return nil, ErrNoFunc - } - - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %k last segment of the package path -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH, -// or the module path joined to the path of source file relative -// to module root -// %#s full path of source file -// %+n import path qualified function name -// %+k full package path -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.frame == (runtime.Frame{}) { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file := c.frame.File - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = pkgFilePath(&c.frame) - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) - } - - case 'd': - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) - - case 'k': - name := c.frame.Function - const pathSep = "/" - start, end := 0, len(name) - if i := strings.LastIndex(name, pathSep); i != -1 { - start = i + len(pathSep) - } - const pkgSep = "." - if i := strings.Index(name[start:], pkgSep); i != -1 { - end = start + i - } - if s.Flag('+') { - start = 0 - } - io.WriteString(s, name[start:end]) - - case 'n': - name := c.frame.Function - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// Frame returns the call frame infomation for the Call. -func (c Call) Frame() runtime.Frame { - return c.frame -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -// -// Deprecated: Use Call.Frame instead. -func (c Call) PC() uintptr { - return c.frame.PC -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md index 6e4b93f1..20d13e97 100644 --- a/vendor/github.com/goccy/go-json/CHANGELOG.md +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -1,3 +1,18 @@ +# v0.9.10 - 2022/07/15 + +### Fix bugs + +* Fix boundary exception of type caching ( #382 ) + +# v0.9.9 - 2022/07/15 + +### Fix bugs + +* Fix encoding of directed interface with typed nil ( #377 ) +* Fix embedded primitive type encoding using alias ( #378 ) +* Fix slice/array type encoding with types implementing MarshalJSON ( #379 ) +* Fix unicode decoding when the expected buffer state is not met after reading ( #380 ) + # v0.9.8 - 2022/06/30 ### Fix bugs diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index f13b43b8..fab64376 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -24,7 +24,7 @@ func init() { if typeAddr == nil { typeAddr = &runtime.TypeAddr{} } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift) + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) } func loadDecoderMap() map[uintptr]Decoder { @@ -393,6 +393,15 @@ func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeTo } allFields = append(allFields, fieldSet) } + } else { + fieldSet := &structFieldSet{ + dec: pdec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) } } else { fieldSet := &structFieldSet{ diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go index cef6688b..871ab3d7 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/string.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -95,24 +95,30 @@ func unicodeToRune(code []byte) rune { return r } +func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { + for s.cursor+n >= s.length { + if !s.read() { + return false + } + *p = s.bufptr() + } + return true +} + func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { const defaultOffset = 5 const surrogateOffset = 11 - if s.cursor+defaultOffset >= s.length { - if !s.read() { - return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) - } - p = s.bufptr() + if !readAtLeast(s, defaultOffset, &p) { + return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) } r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) if utf16.IsSurrogate(r) { - if s.cursor+surrogateOffset >= s.length { - s.read() - p = s.bufptr() + if !readAtLeast(s, surrogateOffset, &p) { + return unicode.ReplacementChar, defaultOffset, p, nil } - if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { + if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { return unicode.ReplacementChar, defaultOffset, p, nil } r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index de7323c8..bf5e0f94 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -31,7 +31,7 @@ func init() { if typeAddr == nil { typeAddr = &runtime.TypeAddr{} } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift) + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) } func loadOpcodeMap() map[uintptr]*OpcodeSet { @@ -487,7 +487,10 @@ func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { case typ.Kind() == reflect.Map: return c.ptrCode(runtime.PtrTo(typ)) default: - code, err := c.typeToCodeWithPtr(typ, false) + // isPtr was originally used to indicate whether the type of top level is pointer. + // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. + // See here for related issues: https://github.com/goccy/go-json/issues/370 + code, err := c.typeToCodeWithPtr(typ, true) if err != nil { return nil, err } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go index 91b11e1f..645d20f9 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go @@ -3,6 +3,7 @@ package vm import ( "math" + "reflect" "sort" "unsafe" @@ -194,9 +195,12 @@ func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]b typ = iface.typ } if ifacePtr == nil { - b = appendNullComma(ctx, b) - code = code.Next - break + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go index 5c6c52c3..a63e83e5 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go @@ -3,6 +3,7 @@ package vm_color import ( "math" + "reflect" "sort" "unsafe" @@ -194,9 +195,12 @@ func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]b typ = iface.typ } if ifacePtr == nil { - b = appendNullComma(ctx, b) - code = code.Next - break + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go index 42dc11ca..3b4e22e5 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go @@ -3,6 +3,7 @@ package vm_color_indent import ( "math" + "reflect" "sort" "unsafe" @@ -194,9 +195,12 @@ func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]b typ = iface.typ } if ifacePtr == nil { - b = appendNullComma(ctx, b) - code = code.Next - break + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go index dfe0cc64..836c5c8a 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go @@ -3,6 +3,7 @@ package vm_indent import ( "math" + "reflect" "sort" "unsafe" @@ -194,9 +195,12 @@ func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]b typ = iface.typ } if ifacePtr == nil { - b = appendNullComma(ctx, b) - code = code.Next - break + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) diff --git a/vendor/github.com/golang-sql/sqlexp/messages.go b/vendor/github.com/golang-sql/sqlexp/messages.go index b809de6b..ae57b3cd 100644 --- a/vendor/github.com/golang-sql/sqlexp/messages.go +++ b/vendor/github.com/golang-sql/sqlexp/messages.go @@ -2,6 +2,7 @@ package sqlexp import ( "context" + "fmt" ) // RawMessage is returned from RowsMessage. @@ -71,7 +72,7 @@ type ( MsgLastInsertID struct{ Value interface{} } // MsgNotice is raised from the SQL text and is only informational. - MsgNotice struct{ Message string } + MsgNotice struct{ Message fmt.Stringer } // MsgError returns SQL errors from the database system (not transport // or other system level errors). diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic/jsonschema/display.go index 028a760a..8677ed49 100644 --- a/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/vendor/github.com/google/gnostic/jsonschema/display.go @@ -46,8 +46,23 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } + if schema.ReadOnly != nil && *schema.ReadOnly { + result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) + } if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema#": + fallthrough + case "#": + fallthrough + case "": + result += indent + "id: " + *(schema.ID) + "\n" + default: + result += indent + "$id: " + *(schema.ID) + "\n" + } } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic/jsonschema/models.go index 4781bdc5..0d877249 100644 --- a/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/vendor/github.com/google/gnostic/jsonschema/models.go @@ -23,9 +23,11 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + ReadOnly *bool + WriteOnly *bool // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic/jsonschema/reader.go index b8583d46..a909a341 100644 --- a/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic/jsonschema/reader.go @@ -165,7 +165,6 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil } return nil diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic/jsonschema/writer.go index 340dc5f9..15b1f905 100644 --- a/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic/jsonschema/writer.go @@ -16,6 +16,7 @@ package jsonschema import ( "fmt" + "strings" "gopkg.in/yaml.v3" ) @@ -33,7 +34,11 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - result += "\"" + value.Value + "\"" + if value.Tag == "!!bool" { + result += value.Value + } else { + result += "\"" + value.Value + "\"" + } case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -58,7 +63,11 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" + if item.Tag == "!!bool" { + result += innerIndent + item.Value + } else { + result += innerIndent + "\"" + item.Value + "\"" + } case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -260,11 +269,26 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema": + fallthrough + case "#": + fallthrough + case "": + content = appendPair(content, "id", nodeForString(*schema.ID)) + default: + content = appendPair(content, "$id", nodeForString(*schema.ID)) + } } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } + if schema.ReadOnly != nil && *schema.ReadOnly { + content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) + } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go index 0f179076..28c2777d 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go @@ -7887,7 +7887,12 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go index 5f4a7025..d54a84db 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go @@ -8560,7 +8560,12 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go index 499e7f93..90a56f55 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,12 +6760,13 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, + 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto index 1be335b8..7aede5ed 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic/openapiv3/README.md index 5ee12d92..83603b82 100644 --- a/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/vendor/github.com/google/gnostic/openapiv3/README.md @@ -19,3 +19,7 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). + +### How to rebuild + +`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go new file mode 100644 index 00000000..ae242f30 --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go @@ -0,0 +1,183 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/vendor/github.com/google/gnostic/openapiv3/annotations.proto new file mode 100644 index 00000000..0bd87810 --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.proto @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "openapiv3/OpenAPIv3.proto"; +import "google/protobuf/descriptor.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} \ No newline at end of file diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99..061d72ae 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1..97c1b34f 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a45..b503aae7 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 00000000..5bb36594 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f93..761520a8 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 00000000..cd3fcd1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 00000000..1931f400 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000..9171c972 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 00000000..2517a287 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,39 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + + +--- + +⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000..2efd8355 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,422 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 00000000..813ffb1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000..331eebc8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1230 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000..8db0cef9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 00000000..c64f8c82 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000..dc2c1f64 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 00000000..d0742bf2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 00000000..36250ca7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 00000000..c854225e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000..e0f466b7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000..24d53b38 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,365 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 00000000..a62b68cc --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 00000000..e1b2b44f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000..7bf2f66c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000..2e668f6b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 00000000..5f16dd14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,45 @@ +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 00000000..4d250509 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 00000000..da5d1aca --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,296 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 00000000..e87df699 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,407 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 00000000..cc888d43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 00000000..8a0681af --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index b13a50ed..d324c43b 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,7 +1,12 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: + - go test -race -v ./... +after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index d1cefa87..7e6f7aee 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,51 +1,61 @@ # Mergo -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -### Latest release +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -86,8 +96,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) -## Installation +## Install go get github.com/imdario/mergo @@ -98,7 +111,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -124,9 +137,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -158,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -174,10 +185,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -201,14 +212,13 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -220,3 +230,6 @@ Written by [Dario Castañé](http://dario.im). ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7ba..fcd985f9 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 6ea38e63..a13a7ee4 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -140,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 706b2206..8b4e2f47 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,25 +9,43 @@ package mergo import ( + "fmt" "reflect" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -39,6 +57,10 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -57,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -66,21 +88,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -90,6 +125,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } continue } fallthrough @@ -124,19 +162,43 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = srcSlice } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -147,19 +209,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } } case reflect.Ptr: fallthrough case reflect.Interface: - if src.IsNil() { + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } break } + if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } @@ -176,18 +260,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } } } + return } @@ -199,7 +296,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -218,12 +315,37 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithAppendSlice will make merge append slices instead of overwriting it +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -243,3 +365,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2f..9fe362d4 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,9 +17,10 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -64,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 00000000..b2be23c8 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 00000000..1cfa28cb --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,26 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.15.x" + - "1.16.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE new file mode 100644 index 00000000..0d31edfa --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/LICENSE @@ -0,0 +1,23 @@ + Copyright (c) 2013, Jason Moiron + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 00000000..0d715929 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,213 @@ +# sqlx + +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [user documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +1.3.0: + +* `sqlx.DB.Connx(context.Context) *sqlx.Conn` +* `sqlx.BindDriver(driverName, bindType)` +* support for `[]map[string]interface{}` to do "batch" insertions +* allocation & perf improvements for `sqlx.In` + +DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with +sqlx's wrapping of other types. + +`BindDriver` allows users to control the bindvars that sqlx will use for drivers, +and add new drivers at runtime. This results in a very slight performance hit +when resolving the driver into a bind type (~40ns per call), but it allows users +to specify what bindtype their driver uses even when sqlx has not been updated +to know about it by default. + +### Backwards Compatibility + +Compatibility with the most recent two versions of Go is a requirement for any +new changes. Compatibility beyond that is not guaranteed. + +Versioning is done with Go modules. Breaking changes (eg. removing deprecated API) +will get major version number bumps. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) + + + // batch insert + + // batch insert with structs + personStructs := []Person{ + {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, + {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, + {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personStructs) + + // batch insert with maps + personMaps := []map[string]interface{}{ + {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, + {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, + {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personMaps) +} +``` diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 00000000..ec0da4e7 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,265 @@ +package sqlx + +import ( + "bytes" + "database/sql/driver" + "errors" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED + AT +) + +var defaultBinds = map[int][]string{ + DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"}, + QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"}, + NAMED: []string{"oci8", "ora", "goracle", "godror"}, + AT: []string{"sqlserver"}, +} + +var binds sync.Map + +func init() { + for bind, drivers := range defaultBinds { + for _, driver := range drivers { + BindDriver(driver, bind) + } + } + +} + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + itype, ok := binds.Load(driverName) + if !ok { + return UNKNOWN + } + return itype.(int) +} + +// BindDriver sets the BindType for driverName to bindType. +func BindDriver(driverName string, bindType int) { + binds.Store(driverName, bindType) +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + case AT: + rqb = append(rqb, '@', 'p') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +func asSliceForIn(i interface{}) (v reflect.Value, ok bool) { + if i == nil { + return reflect.Value{}, false + } + + v = reflect.ValueOf(i) + t := reflectx.Deref(v.Type()) + + // Only expand slices + if t.Kind() != reflect.Slice { + return reflect.Value{}, false + } + + // []byte is a driver.Value type so it should not be expanded + if t == reflect.TypeOf([]byte{}) { + return reflect.Value{}, false + + } + + return v, true +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + var stackMeta [32]argMeta + + var meta []argMeta + if len(args) <= len(stackMeta) { + meta = stackMeta[:len(args)] + } else { + meta = make([]argMeta, len(args)) + } + + for i, arg := range args { + if a, ok := arg.(driver.Valuer); ok { + var err error + arg, err = a.Value() + if err != nil { + return "", nil, err + } + } + + if v, ok := asSliceForIn(arg); ok { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + + var buf strings.Builder + buf.Grow(len(query) + len(", ?")*flatArgsCount) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf.WriteString(query[:offset+i+1]) + + for si := 1; si < argMeta.length; si++ { + buf.WriteString(", ?") + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf.WriteString(query) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return buf.String(), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 00000000..e2b4e60b --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,12 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +// +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 00000000..728aa04d --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,458 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// convertMapStringInterface attempts to convert v to map[string]interface{}. +// Unlike v.(map[string]interface{}), this function works on named types that +// are convertible to map[string]interface{} as well. +func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) { + var m map[string]interface{} + mtype := reflect.TypeOf(m) + t := reflect.TypeOf(v) + if !t.ConvertibleTo(mtype) { + return nil, false + } + return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true + +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := convertMapStringInterface(arg); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + v := reflect.ValueOf(arg) + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindAnyArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`) + +func findMatchingClosingBracketIndex(s string) int { + count := 0 + for i, ch := range s { + if ch == '(' { + count++ + } + if ch == ')' { + count-- + if count == 0 { + return i + } + } + } + return 0 +} + +func fixBound(bound string, loop int) string { + loc := valuesReg.FindStringIndex(bound) + // defensive guard when "VALUES (...)" not found + if len(loc) < 2 { + return bound + } + + openingBracketIndex := loc[1] - 1 + index := findMatchingClosingBracketIndex(bound[openingBracketIndex:]) + // defensive guard. must have closing bracket + if index == 0 { + return bound + } + closingBracketIndex := openingBracketIndex + index + 1 + + var buffer bytes.Buffer + + buffer.WriteString(bound[0:closingBracketIndex]) + for i := 0; i < loop-1; i++ { + buffer.WriteString(",") + buffer.WriteString(bound[openingBracketIndex:closingBracketIndex]) + } + buffer.WriteString(bound[closingBracketIndex:]) + return buffer.String() +} + +// bindArray binds a named parameter query with fields from an array or slice of +// structs argument. +func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + // do the initial binding with QUESTION; if bindType is not question, + // we can rebind it at the end. + bound, names, err := compileNamedQuery([]byte(query), QUESTION) + if err != nil { + return "", []interface{}{}, err + } + arrayValue := reflect.ValueOf(arg) + arrayLen := arrayValue.Len() + if arrayLen == 0 { + return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg) + } + var arglist = make([]interface{}, 0, len(names)*arrayLen) + for i := 0; i < arrayLen; i++ { + elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m) + if err != nil { + return "", []interface{}{}, err + } + arglist = append(arglist, elemArglist...) + } + if arrayLen > 1 { + bound = fixBound(bound, arrayLen) + } + // adjust binding type if we weren't on question + if bindType != QUESTION { + bound = Rebind(bindType, bound) + } + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + } else if inName && i > 0 && b == '=' && len(name) == 0 { + rebound = append(rebound, ':', '=') + inName = false + continue + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + case AT: + rebound = append(rebound, '@', 'p') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + t := reflect.TypeOf(arg) + k := t.Kind() + switch { + case k == reflect.Map && t.Key().Kind() == reflect.String: + m, ok := convertMapStringInterface(arg) + if !ok { + return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg) + } + return bindMap(bindType, query, m) + case k == reflect.Array || k == reflect.Slice: + return bindArray(bindType, query, arg, m) + default: + return bindStruct(bindType, query, arg, m) + } +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 00000000..07ad2165 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,132 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 00000000..f01d3d1f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 00000000..0b109942 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,444 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +// +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + copy(x, is) + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + // check if nothing has already been pushed with the same path + // sometimes you can choose to override a type using embedded struct + fld, ok := flds.Paths[fi.Path] + if !ok || fld.Embedded { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 00000000..f7b28768 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1051 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// * it is not a struct +// * it implements sql.Scanner +// * it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + return len(mapper().TypeMap(t).Index) == 0 +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i := i.(type) { + case DB: + return i.Mapper + case *DB: + return i.Mapper + case Tx: + return i.Mapper + case *Tx: + return i.Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Conn is a wrapper around sql.Conn with extra functionality +type Conn struct { + *sql.Conn + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. It resets the slice length to zero before appending each element to +// the slice. If the destination slice type is a Struct, then StructScan will +// be used on each row. If the destination is some other kind of base type, +// then each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + direct.SetLen(0) + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows.(type) { + case *Rows: + m = rows.(*Rows).Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 00000000..7aa4dd01 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,414 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Connx returns an *sqlx.Conn instead of an *sql.Conn. +func (db *DB) Connx(ctx context.Context) (*Conn, error) { + conn, err := db.DB.Conn(ctx) + if err != nil { + return nil, err + } + + return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := c.Conn.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// SelectContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, c, dest, query, args...) +} + +// GetContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, c, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, c, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := c.Conn.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := c.Conn.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper} +} + +// Rebind a query within a Conn's bindvar type. +func (c *Conn) Rebind(query string) string { + return Rebind(BindType(c.driverName), query) +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore new file mode 100644 index 00000000..e43b0f98 --- /dev/null +++ b/vendor/github.com/joho/godotenv/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 00000000..e7ddd51b --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md new file mode 100644 index 00000000..1ec45b28 --- /dev/null +++ b/vendor/github.com/joho/godotenv/README.md @@ -0,0 +1,188 @@ +# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) + +A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) + +From the original Library: + +> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. +> +> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. + +It can be used as a library (for loading in env for your own daemons etc) or as a bin command. + +There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. + +## Installation + +As a library + +```shell +go get github.com/joho/godotenv +``` + +or if you want to use it as a bin command +```shell +go get github.com/joho/godotenv/cmd/godotenv +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Then in your Go app you can do something like + +```go +package main + +import ( + "github.com/joho/godotenv" + "log" + "os" +) + +func main() { + err := godotenv.Load() + if err != nil { + log.Fatal("Error loading .env file") + } + + s3Bucket := os.Getenv("S3_BUCKET") + secretKey := os.Getenv("SECRET_KEY") + + // now do something with s3 or whatever +} +``` + +If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import + +```go +import _ "github.com/joho/godotenv/autoload" +``` + +While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit + +```go +_ = godotenv.Load("somerandomfile") +_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") +``` + +If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) + +```shell +# I am a comment and that is OK +SOME_VAR=someval +FOO=BAR # comments at line end are OK too +export BAR=BAZ +``` + +Or finally you can do YAML(ish) style + +```yaml +FOO: bar +BAR: baz +``` + +as a final aside, if you don't want godotenv munging your env you can just get a map back instead + +```go +var myEnv map[string]string +myEnv, err := godotenv.Read() + +s3Bucket := myEnv["S3_BUCKET"] +``` + +... or from an `io.Reader` instead of a local file + +```go +reader := getRemoteFile() +myEnv, err := godotenv.Parse(reader) +``` + +... or from a `string` if you so desire + +```go +content := getRemoteFileContent() +myEnv, err := godotenv.Unmarshal(content) +``` + +### Precedence & Conventions + +Existing envs take precedence of envs that are loaded later. + +The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use) +for managing multiple environments (i.e. development, test, production) +is to create an env named `{YOURAPP}_ENV` and load envs in this order: + +```go +env := os.Getenv("FOO_ENV") +if "" == env { + env = "development" +} + +godotenv.Load(".env." + env + ".local") +if "test" != env { + godotenv.Load(".env.local") +} +godotenv.Load(".env." + env) +godotenv.Load() // The Original .env +``` + +If you need to, you can also use `godotenv.Overload()` to defy this convention +and overwrite existing envs instead of only supplanting them. Use with caution. + +### Command Mode + +Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` + +``` +godotenv -f /some/path/to/.env some_command with some args +``` + +If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` + +### Writing Env Files + +Godotenv can also write a map representing the environment to a correctly-formatted and escaped file + +```go +env, err := godotenv.Unmarshal("KEY=value") +err := godotenv.Write(env, "./.env") +``` + +... or to a string + +```go +env, err := godotenv.Unmarshal("KEY=value") +content, err := godotenv.Marshal(env) +``` + +## Contributing + +Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. + +*code changes without tests will not be accepted* + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Releases + +Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. + +Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` + +## CI + +Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) + +## Who? + +The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 00000000..466f2eb4 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,363 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, err := Marshal(envMap) + if err != nil { + return err + } + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(content + "\n") + if err != nil { + return err + } + file.Sync() + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + if d, err := strconv.Atoi(v); err == nil { + lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) + } else { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`) + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.TrimSpace(key) + + key = exportRegex.ReplaceAllString(splitString[0], "$1") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +var ( + singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`) + doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`) + escapeRegex = regexp.MustCompile(`\\.`) + unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) +) + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + singleQuotes := singleQuotesRegex.FindStringSubmatch(value) + + doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + value = unescapeCharsRegex.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + +func expandVariables(v string, m map[string]string) string { + return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { + submatch := expandVarRegex.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.TrimSpace(line) + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/joho/godotenv/renovate.json b/vendor/github.com/joho/godotenv/renovate.json new file mode 100644 index 00000000..f45d8f11 --- /dev/null +++ b/vendor/github.com/joho/godotenv/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base" + ] +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index e050d535..7d83f672 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -31,8 +31,10 @@ var ( ErrNotSupported = errors.New("pq: Unsupported command") ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") - ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") - ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") + + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") errUnexpectedReady = errors.New("unexpected ReadyForQuery") errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") @@ -322,7 +324,7 @@ func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { if err != nil { return nil, err } - c.dialer = d + c.Dialer(d) return c.open(context.Background()) } diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go index d7d47261..1145e122 100644 --- a/vendor/github.com/lib/pq/connector.go +++ b/vendor/github.com/lib/pq/connector.go @@ -27,6 +27,11 @@ func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { return c.open(ctx) } +// Dialer allows change the dialer used to open connections. +func (c *Connector) Dialer(dialer Dialer) { + c.dialer = dialer +} + // Driver returns the underlying driver of this Connector. func (c *Connector) Driver() driver.Driver { return &Driver{} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go index c072bc3b..2f5c1ec8 100644 --- a/vendor/github.com/lib/pq/copy.go +++ b/vendor/github.com/lib/pq/copy.go @@ -1,6 +1,7 @@ package pq import ( + "context" "database/sql/driver" "encoding/binary" "errors" @@ -273,6 +274,43 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { return driver.RowsAffected(0), nil } +// CopyData inserts a raw string into the COPY stream. The insert is +// asynchronous and CopyData can return errors from previous CopyData calls to +// the same COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if finish := ci.cn.watchCancel(ctx); finish != nil { + defer finish() + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + ci.buffer = append(ci.buffer, []byte(line)...) + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + func (ci *copyin) Close() (err error) { if ci.closed { // Don't do anything, we're already closed return nil diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 210b1ec3..bffe6096 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -422,7 +422,7 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro if remainderIdx < len(str) && str[remainderIdx] == '.' { fracStart := remainderIdx + 1 - fracOff := strings.IndexAny(str[fracStart:], "-+ ") + fracOff := strings.IndexAny(str[fracStart:], "-+Z ") if fracOff < 0 { fracOff = len(str) - fracStart } @@ -432,7 +432,7 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro remainderIdx += fracOff + 1 } if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { - // time zone separator is always '-' or '+' (UTC is +00) + // time zone separator is always '-' or '+' or 'Z' (UTC is +00) var tzSign int switch c := str[tzStart]; c { case '-': @@ -454,7 +454,11 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro remainderIdx += 3 } tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } else if tzStart < len(str) && str[tzStart] == 'Z' { + // time zone Z separator indicates UTC is +00 + remainderIdx += 1 } + var isoYear int if isBC { diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go index 5cfe9c6e..21b3d933 100644 --- a/vendor/github.com/lib/pq/error.go +++ b/vendor/github.com/lib/pq/error.go @@ -402,6 +402,11 @@ func (err *Error) Fatal() bool { return err.Severity == Efatal } +// SQLState returns the SQLState of the error. +func (err *Error) SQLState() string { + return string(err.Code) +} + // Get implements the legacy PGError interface. New code should use the fields // of the Error struct directly. func (err *Error) Get(k byte) (v string) { diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go index 014af6a1..d587f102 100644 --- a/vendor/github.com/lib/pq/ssl_permissions.go +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -3,7 +3,28 @@ package pq -import "os" +import ( + "errors" + "os" + "syscall" +) + +const ( + rootUserID = uint32(0) + + // The maximum permissions that a private key file owned by a regular user + // is allowed to have. This translates to u=rw. + maxUserOwnedKeyPermissions os.FileMode = 0600 + + // The maximum permissions that a private key file owned by root is allowed + // to have. This translates to u=rw,g=r. + maxRootOwnedKeyPermissions os.FileMode = 0640 +) + +var ( + errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") + errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") +) // sslKeyPermissions checks the permissions on user-supplied ssl key files. // The key file should have very little access. @@ -14,8 +35,59 @@ func sslKeyPermissions(sslkey string) error { if err != nil { return err } - if info.Mode().Perm()&0077 != 0 { - return ErrSSLKeyHasWorldPermissions + + err = hasCorrectPermissions(info) + + // return ErrSSLKeyHasWorldPermissions for backwards compatability with + // existing code. + if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { + err = ErrSSLKeyHasWorldPermissions } - return nil + return err +} + +// hasCorrectPermissions checks the file info (and the unix-specific stat_t +// output) to verify that the permissions on the file are correct. +// +// If the file is owned by the same user the process is running as, +// the file should only have 0600 (u=rw). If the file is owned by root, +// and the group matches the group that the process is running in, the +// permissions cannot be more than 0640 (u=rw,g=r). The file should +// never have world permissions. +// +// Returns an error when the permission check fails. +func hasCorrectPermissions(info os.FileInfo) error { + // if file's permission matches 0600, allow access. + userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) + + // regardless of if we're running as root or not, 0600 is acceptable, + // so we return if we match the regular user permission mask. + if info.Mode().Perm()&userPermissionMask == 0 { + return nil + } + + // We need to pull the Unix file information to get the file's owner. + // If we can't access it, there's some sort of operating system level error + // and we should fail rather than attempting to use faulty information. + sysInfo := info.Sys() + if sysInfo == nil { + return ErrSSLKeyUnknownOwnership + } + + unixStat, ok := sysInfo.(*syscall.Stat_t) + if !ok { + return ErrSSLKeyUnknownOwnership + } + + // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what + // Postgres does. + if unixStat.Uid == rootUserID { + rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) + if info.Mode().Perm()&rootPermissionMask != 0 { + return errSSLKeyHasUnacceptableRootPermissions + } + return nil + } + + return errSSLKeyHasUnacceptableUserPermissions } diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65..b5f5e261 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 7942c565..00000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e055952b..ca048371 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,6 +1,6 @@ # go-colorable -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) [![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) [![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f7806fe..416d1bbb 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 08cbd1e0..766d9460 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 41215d7f..1846ad5a 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 2dcb09aa..05d6f74b 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,21 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - _, err = w.out.Write(bw[:]) - if err != nil { - break loop - } + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -41,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -50,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore new file mode 100644 index 00000000..e0a38e1c --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.gitignore @@ -0,0 +1,5 @@ +coverage.out +release-notes.txt +.directory +.chglog +.vscode \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/.travis.yml b/vendor/github.com/montanaflynn/stats/.travis.yml new file mode 100644 index 00000000..28118fbf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.travis.yml @@ -0,0 +1,29 @@ +language: go +go: + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - "1.14" + - "1.15" + - "1.16" + - stable + - master +arch: + - amd64 + - arm64 +before_install: + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out +after_success: + - $GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci +notifications: + email: + recipients: + - montana@montanaflynn.me + on_success: change + on_failure: always diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md new file mode 100644 index 00000000..ad842a54 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -0,0 +1,598 @@ + + +## [v0.6.6](https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6) (2021-04-26) + +### Add + +* Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) +* Add latest versions of Go to test against + +### Use + +* Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) + + + +## [v0.6.5](https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5) (2021-02-21) + +### Add + +* Add Float64Data.Quartiles documentation + +### Update + +* Update changelog with v0.6.5 changes + + + +## [v0.6.4](https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4) (2021-02-21) + +### Add + +* Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) + +### Fix + +* Fix make release changelog command and add changelog history +* Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) + +### Update + +* Update changelog with v0.6.4 changes +* Update README.md links to CHANGELOG.md and DOCUMENTATION.md +* Update README.md and Makefile with new release commands +* Update changelog with v0.6.4 changes +* Update examples directory to include a README.md used for synopsis +* Update go.mod to include go version where modules are enabled by default +* Update changelog with v0.6.3 changes + + + +## [v0.6.3](https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3) (2020-02-18) + +### Add + +* Add creating and committing changelog to Makefile release directive +* Add release-notes.txt and .chglog directory to .gitignore + +### Update + +* Update exported tests to use import for better example documentation +* Update documentation using godoc2md +* Update changelog with v0.6.2 release + + + +## [v0.6.2](https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2) (2020-02-18) + +### Fix + +* Fix linting errcheck warnings in go benchmarks + +### Update + +* Update Makefile release directive to use correct release name + + + +## [v0.6.1](https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1) (2020-02-18) + +### Add + +* Add StableSample function signature to readme + +### Fix + +* Fix linting warnings for normal distribution functions formatting and tests + +### Update + +* Update documentation links and rename DOC.md to DOCUMENTATION.md +* Update README with link to pkg.go.dev reference and release section +* Update Makefile with new changelog, docs, and release directives +* Update DOC.md links to GitHub source code +* Update doc.go comment and add DOC.md package reference file +* Update changelog using git-chglog + + + +## [v0.6.0](https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0) (2020-02-17) + +### Add + +* Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) +* Add previous versions of Go to travis CI config +* Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) +* Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) +* Add doc.go file to show description and usage on godoc.org +* Add comments to new error and legacy error variables +* Add ExampleRound function to tests +* Add go.mod file for module support +* Add Sigmoid, SoftMax and Entropy methods and tests +* Add Entropy documentation, example and benchmarks +* Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) + +### Fix + +* Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) +* Fix AutoCorrelation name in comments and remove unneeded Sprintf + +### Improve + +* Improve documentation section with command comments + +### Remove + +* Remove very old versions of Go in travis CI config +* Remove boolean comparison to get rid of gometalinter warning + +### Update + +* Update license dates +* Update Distance functions signatures to use Float64Data +* Update Sigmoid examples +* Update error names with backward compatibility + +### Use + +* Use relative link to examples/main.go +* Use a single var block for exported errors + + + +## [v0.5.0](https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0) (2019-01-16) + +### Add + +* Add Sigmoid and Softmax functions + +### Fix + +* Fix syntax highlighting and add CumulativeSum func + + + +## [v0.4.0](https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0) (2019-01-14) + +### Add + +* Add goreport badge and documentation section to README.md +* Add Examples to test files +* Add AutoCorrelation and nist tests +* Add String method to statsErr type +* Add Y coordinate error for ExponentialRegression +* Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) +* Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) +* Add more tests and rename distance files +* Add coverage and benchmarks to azure pipeline +* Add go tests to azure pipeline + +### Change + +* Change travis tip alias to master +* Change codecov to coveralls for code coverage + +### Fix + +* Fix a few lint warnings +* Fix example error + +### Improve + +* Improve test coverage of distance functions + +### Only + +* Only run travis on stable and tip versions +* Only check code coverage on tip + +### Remove + +* Remove azure CI pipeline +* Remove unnecessary type conversions + +### Return + +* Return EmptyInputErr instead of EmptyInput + +### Set + +* Set up CI with Azure Pipelines + + + +## [0.3.0](https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0) (2017-12-02) + +### Add + +* Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) +* Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) +* Add support for time.Duration +* Add LoadRawData to docs and examples +* Add unit test for edge case that wasn't covered +* Add unit tests for edge cases that weren't covered +* Add pearson alias delegating to correlation +* Add CovariancePopulation to Float64Data +* Add pearson product-moment correlation coefficient +* Add population covariance +* Add random slice benchmarks +* Add all applicable functions as methods to Float64Data type +* Add MIT license badge +* Add link to examples/methods.go +* Add Protips for usage and documentation sections +* Add tests for rounding up +* Add webdoc target and remove linting from test target +* Add example usage and consolidate contributing information + +### Added + +* Added MedianAbsoluteDeviation + +### Annotation + +* Annotation spelling error + +### Auto + +* auto commit +* auto commit + +### Calculate + +* Calculate correlation with sdev and covp + +### Clean + +* Clean up README.md and add info for offline docs + +### Consolidated + +* Consolidated all error values. + +### Fix + +* Fix Percentile logic +* Fix InterQuartileRange method test +* Fix zero percent bug and add test +* Fix usage example output typos + +### Improve + +* Improve bounds checking in Percentile +* Improve error log messaging + +### Imput + +* Imput -> Input + +### Include + +* Include alternative way to set Float64Data in example + +### Make + +* Make various changes to README.md + +### Merge + +* Merge branch 'master' of github.com:montanaflynn/stats +* Merge master + +### Mode + +* Mode calculation fix and tests + +### Realized + +* Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. + +### Refactor + +* Refactor testing of Round() +* Refactor setting Coordinate y field using Exp in place of Pow +* Refactor Makefile and add docs target + +### Remove + +* Remove deep links to types and functions + +### Rename + +* Rename file from types to data + +### Retrieve + +* Retrieve InterQuartileRange for the Float64Data. + +### Split + +* Split up stats.go into separate files + +### Support + +* Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) + +### Switch + +* Switch default and check targets + +### Update + +* Update Readme +* Update example methods and some text +* Update README and include Float64Data type method examples + +### Pull Requests + +* Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile +* Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test +* Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master +* Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds +* Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 +* Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration +* Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master +* Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master +* Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug +* Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master +* Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master +* Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master +* Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson +* Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD +* Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master +* Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce +* Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite + + + +## [0.2.0](https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0) (2015-10-14) + +### Add + +* Add Makefile with gometalinter, testing, benchmarking and coverage report targets +* Add comments describing functions and structs +* Add Correlation func +* Add Covariance func +* Add tests for new function shortcuts +* Add StandardDeviation function as a shortcut to StandardDeviationPopulation +* Add Float64Data and Series types + +### Change + +* Change Sample to return a standard []float64 type + +### Fix + +* Fix broken link to Makefile +* Fix broken link and simplify code coverage reporting command +* Fix go vet warning about printf type placeholder +* Fix failing codecov test coverage reporting +* Fix link to CHANGELOG.md + +### Fixed + +* Fixed typographical error, changed accomdate to accommodate in README. + +### Include + +* Include Variance and StandardDeviation shortcuts + +### Pass + +* Pass gometalinter + +### Refactor + +* Refactor Variance function to be the same as population variance + +### Release + +* Release version 0.2.0 + +### Remove + +* Remove unneeded do packages and update cover URL +* Remove sudo from pip install + +### Reorder + +* Reorder functions and sections + +### Revert + +* Revert to legacy containers to preserve go1.1 testing + +### Switch + +* Switch from legacy to container-based CI infrastructure + +### Update + +* Update contributing instructions and mention Makefile + +### Pull Requests + +* Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate + + + +## [0.1.0](https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0) (2015-08-19) + +### Add + +* Add CONTRIBUTING.md + +### Rename + +* Rename functions while preserving backwards compatibility + + + +## 0.0.9 (2015-08-18) + +### Add + +* Add HarmonicMean func +* Add GeometricMean func +* Add .gitignore to avoid commiting test coverage report +* Add Outliers stuct and QuantileOutliers func +* Add Interquartile Range, Midhinge and Trimean examples +* Add Trimean +* Add Midhinge +* Add Inter Quartile Range +* Add a unit test to check for an empty slice error +* Add Quantiles struct and Quantile func +* Add more tests and fix a typo +* Add Golang 1.5 to build tests +* Add a standard MIT license file +* Add basic benchmarking +* Add regression models +* Add codecov token +* Add codecov +* Add check for slices with a single item +* Add coverage tests +* Add back previous Go versions to Travis CI +* Add Travis CI +* Add GoDoc badge +* Add Percentile and Float64ToInt functions +* Add another rounding test for whole numbers +* Add build status badge +* Add code coverage badge +* Add test for NaN, achieving 100% code coverage +* Add round function +* Add standard deviation function +* Add sum function + +### Add + +* add tests for sample +* add sample + +### Added + +* Added sample and population variance and deviation functions +* Added README + +### Adjust + +* Adjust API ordering + +### Avoid + +* Avoid unintended consequence of using sort + +### Better + +* Better performing min/max +* Better description + +### Change + +* Change package path to potentially fix a bug in earlier versions of Go + +### Clean + +* Clean up README and add some more information +* Clean up test error + +### Consistent + +* Consistent empty slice error messages +* Consistent var naming +* Consistent func declaration + +### Convert + +* Convert ints to floats + +### Duplicate + +* Duplicate packages for all versions + +### Export + +* Export Coordinate struct fields + +### First + +* First commit + +### Fix + +* Fix copy pasta mistake testing the wrong function +* Fix error message +* Fix usage output and edit API doc section +* Fix testing edgecase where map was in wrong order +* Fix usage example +* Fix usage examples + +### Include + +* Include the Nearest Rank method of calculating percentiles + +### More + +* More commenting + +### Move + +* Move GoDoc link to top + +### Redirect + +* Redirect kills newer versions of Go + +### Refactor + +* Refactor code and error checking + +### Remove + +* Remove unnecassary typecasting in sum func +* Remove cover since it doesn't work for later versions of go +* Remove golint and gocoveralls + +### Rename + +* Rename StandardDev to StdDev +* Rename StandardDev to StdDev + +### Return + +* Return errors for all functions + +### Run + +* Run go fmt to clean up formatting + +### Simplify + +* Simplify min/max function + +### Start + +* Start with minimal tests + +### Switch + +* Switch wercker to travis and update todos + +### Table + +* table testing style + +### Update + +* Update README and move the example main.go into it's own file +* Update TODO list +* Update README +* Update usage examples and todos + +### Use + +* Use codecov the recommended way +* Use correct string formatting types + +### Pull Requests + +* Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample + diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md new file mode 100644 index 00000000..b5678894 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md @@ -0,0 +1,1237 @@ + + +# stats +`import "github.com/montanaflynn/stats"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) +* [Examples](#pkg-examples) +* [Subdirectories](#pkg-subdirectories) + +## Overview +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) + + + + +## Index +* [Variables](#pkg-variables) +* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation) +* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance) +* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation) +* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance) +* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation) +* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum) +* [func Entropy(input Float64Data) (float64, error)](#Entropy) +* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance) +* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean) +* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean) +* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange) +* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance) +* [func Max(input Float64Data) (max float64, err error)](#Max) +* [func Mean(input Float64Data) (float64, error)](#Mean) +* [func Median(input Float64Data) (median float64, err error)](#Median) +* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation) +* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation) +* [func Midhinge(input Float64Data) (float64, error)](#Midhinge) +* [func Min(input Float64Data) (min float64, err error)](#Min) +* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance) +* [func Mode(input Float64Data) (mode []float64, err error)](#Mode) +* [func Ncr(n, r int) int](#Ncr) +* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs) +* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf) +* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy) +* [func NormFit(data []float64) [2]float64](#NormFit) +* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval) +* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf) +* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf) +* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf) +* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf) +* [func NormMean(loc float64, scale float64) float64](#NormMean) +* [func NormMedian(loc float64, scale float64) float64](#NormMedian) +* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment) +* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf) +* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf) +* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs) +* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf) +* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats) +* [func NormStd(loc float64, scale float64) float64](#NormStd) +* [func NormVar(loc float64, scale float64) float64](#NormVar) +* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson) +* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile) +* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank) +* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance) +* [func Round(input float64, places int) (rounded float64, err error)](#Round) +* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample) +* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance) +* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid) +* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax) +* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample) +* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation) +* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation) +* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample) +* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP) +* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS) +* [func Sum(input Float64Data) (sum float64, err error)](#Sum) +* [func Trimean(input Float64Data) (float64, error)](#Trimean) +* [func VarP(input Float64Data) (sdev float64, err error)](#VarP) +* [func VarS(input Float64Data) (sdev float64, err error)](#VarS) +* [func Variance(input Float64Data) (sdev float64, err error)](#Variance) +* [type Coordinate](#Coordinate) + * [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg) + * [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg) + * [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg) +* [type Float64Data](#Float64Data) + * [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData) + * [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation) + * [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation) + * [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance) + * [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation) + * [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum) + * [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy) + * [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean) + * [func (f Float64Data) Get(i int) float64](#Float64Data.Get) + * [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean) + * [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange) + * [func (f Float64Data) Len() int](#Float64Data.Len) + * [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less) + * [func (f Float64Data) Max() (float64, error)](#Float64Data.Max) + * [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean) + * [func (f Float64Data) Median() (float64, error)](#Float64Data.Median) + * [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation) + * [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation) + * [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge) + * [func (f Float64Data) Min() (float64, error)](#Float64Data.Min) + * [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode) + * [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson) + * [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile) + * [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank) + * [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance) + * [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile) + * [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers) + * [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles) + * [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample) + * [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance) + * [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid) + * [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax) + * [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation) + * [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation) + * [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample) + * [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum) + * [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap) + * [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean) + * [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance) +* [type Outliers](#Outliers) + * [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers) +* [type Quartiles](#Quartiles) + * [func Quartile(input Float64Data) (Quartiles, error)](#Quartile) +* [type Series](#Series) + * [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression) + * [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression) + * [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression) + +#### Examples +* [AutoCorrelation](#example_AutoCorrelation) +* [ChebyshevDistance](#example_ChebyshevDistance) +* [Correlation](#example_Correlation) +* [CumulativeSum](#example_CumulativeSum) +* [Entropy](#example_Entropy) +* [LinearRegression](#example_LinearRegression) +* [LoadRawData](#example_LoadRawData) +* [Max](#example_Max) +* [Median](#example_Median) +* [Min](#example_Min) +* [Round](#example_Round) +* [Sigmoid](#example_Sigmoid) +* [SoftMax](#example_SoftMax) +* [Sum](#example_Sum) + +#### Package files +[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) + + + +## Variables +``` go +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) +``` +These are the package-wide error values. +All error identification should use these values. +https://github.com/golang/go/wiki/Errors#naming + +``` go +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) +``` +Legacy error names that didn't start with Err + + + +## func [AutoCorrelation](/correlation.go?s=853:918#L38) +``` go +func AutoCorrelation(data Float64Data, lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + +## func [ChebyshevDistance](/distances.go?s=368:456#L20) +``` go +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ChebyshevDistance computes the Chebyshev distance between two data sets + + + +## func [Correlation](/correlation.go?s=112:171#L8) +``` go +func Correlation(data1, data2 Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + +## func [Covariance](/variance.go?s=1284:1342#L53) +``` go +func Covariance(data1, data2 Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + +## func [CovariancePopulation](/variance.go?s=1864:1932#L81) +``` go +func CovariancePopulation(data1, data2 Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables. + + + +## func [CumulativeSum](/cumulative_sum.go?s=81:137#L4) +``` go +func CumulativeSum(input Float64Data) ([]float64, error) +``` +CumulativeSum calculates the cumulative sum of the input slice + + + +## func [Entropy](/entropy.go?s=77:125#L6) +``` go +func Entropy(input Float64Data) (float64, error) +``` +Entropy provides calculation of the entropy + + + +## func [EuclideanDistance](/distances.go?s=836:924#L36) +``` go +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +EuclideanDistance computes the Euclidean distance between two data sets + + + +## func [GeometricMean](/mean.go?s=319:373#L18) +``` go +func GeometricMean(input Float64Data) (float64, error) +``` +GeometricMean gets the geometric mean for a slice of numbers + + + +## func [HarmonicMean](/mean.go?s=717:770#L40) +``` go +func HarmonicMean(input Float64Data) (float64, error) +``` +HarmonicMean gets the harmonic mean for a slice of numbers + + + +## func [InterQuartileRange](/quartile.go?s=821:880#L45) +``` go +func InterQuartileRange(input Float64Data) (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + +## func [ManhattanDistance](/distances.go?s=1277:1365#L50) +``` go +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ManhattanDistance computes the Manhattan distance between two data sets + + + +## func [Max](/max.go?s=78:130#L8) +``` go +func Max(input Float64Data) (max float64, err error) +``` +Max finds the highest number in a slice + + + +## func [Mean](/mean.go?s=77:122#L6) +``` go +func Mean(input Float64Data) (float64, error) +``` +Mean gets the average of a slice of numbers + + + +## func [Median](/median.go?s=85:143#L6) +``` go +func Median(input Float64Data) (median float64, err error) +``` +Median gets the median number in a slice of numbers + + + +## func [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6) +``` go +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median + + + +## func [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11) +``` go +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + +## func [Midhinge](/quartile.go?s=1075:1124#L55) +``` go +func Midhinge(input Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + +## func [Min](/min.go?s=78:130#L6) +``` go +func Min(input Float64Data) (min float64, err error) +``` +Min finds the lowest number in a set of data + + + +## func [MinkowskiDistance](/distances.go?s=2152:2256#L75) +``` go +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) +``` +MinkowskiDistance computes the Minkowski distance between two data sets + +Arguments: + + + dataPointX: First set of data points + dataPointY: Second set of data points. Length of both data + sets must be equal. + lambda: aka p or city blocks; With lambda = 1 + returned distance is manhattan distance and + lambda = 2; it is euclidean distance. Lambda + reaching to infinite - distance would be chebysev + distance. + +Return: + + + Distance or error + + + +## func [Mode](/mode.go?s=85:141#L4) +``` go +func Mode(input Float64Data) (mode []float64, err error) +``` +Mode gets the mode [most frequent value(s)] of a slice of float64s + + + +## func [Ncr](/norm.go?s=7384:7406#L239) +``` go +func Ncr(n, r int) int +``` +Ncr is an N choose R algorithm. +Aaron Cannon's algorithm. + + + +## func [NormBoxMullerRvs](/norm.go?s=667:736#L23) +``` go +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 +``` +NormBoxMullerRvs generates random variates using the Box–Muller transform. +For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html + + + +## func [NormCdf](/norm.go?s=1826:1885#L52) +``` go +func NormCdf(x float64, loc float64, scale float64) float64 +``` +NormCdf is the cumulative distribution function. + + + +## func [NormEntropy](/norm.go?s=5773:5825#L180) +``` go +func NormEntropy(loc float64, scale float64) float64 +``` +NormEntropy is the differential entropy of the RV. + + + +## func [NormFit](/norm.go?s=6058:6097#L187) +``` go +func NormFit(data []float64) [2]float64 +``` +NormFit returns the maximum likelihood estimators for the Normal Distribution. +Takes array of float64 values. +Returns array of Mean followed by Standard Deviation. + + + +## func [NormInterval](/norm.go?s=6976:7047#L221) +``` go +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 +``` +NormInterval finds endpoints of the range that contains alpha percent of the distribution. + + + +## func [NormIsf](/norm.go?s=4330:4393#L137) +``` go +func NormIsf(p float64, loc float64, scale float64) (x float64) +``` +NormIsf is the inverse survival function (inverse of sf). + + + +## func [NormLogCdf](/norm.go?s=2016:2078#L57) +``` go +func NormLogCdf(x float64, loc float64, scale float64) float64 +``` +NormLogCdf is the log of the cumulative distribution function. + + + +## func [NormLogPdf](/norm.go?s=1590:1652#L47) +``` go +func NormLogPdf(x float64, loc float64, scale float64) float64 +``` +NormLogPdf is the log of the probability density function. + + + +## func [NormLogSf](/norm.go?s=2423:2484#L67) +``` go +func NormLogSf(x float64, loc float64, scale float64) float64 +``` +NormLogSf is the log of the survival function. + + + +## func [NormMean](/norm.go?s=6560:6609#L206) +``` go +func NormMean(loc float64, scale float64) float64 +``` +NormMean is the mean/expected value of the distribution. + + + +## func [NormMedian](/norm.go?s=6431:6482#L201) +``` go +func NormMedian(loc float64, scale float64) float64 +``` +NormMedian is the median of the distribution. + + + +## func [NormMoment](/norm.go?s=4694:4752#L146) +``` go +func NormMoment(n int, loc float64, scale float64) float64 +``` +NormMoment approximates the non-central (raw) moment of order n. +For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution + + + +## func [NormPdf](/norm.go?s=1357:1416#L42) +``` go +func NormPdf(x float64, loc float64, scale float64) float64 +``` +NormPdf is the probability density function. + + + +## func [NormPpf](/norm.go?s=2854:2917#L75) +``` go +func NormPpf(p float64, loc float64, scale float64) (x float64) +``` +NormPpf is the point percentile function. +This is based on Peter John Acklam's inverse normal CDF. +algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ + + + +## func [NormPpfRvs](/norm.go?s=247:310#L12) +``` go +func NormPpfRvs(loc float64, scale float64, size int) []float64 +``` +NormPpfRvs generates random variates using the Point Percentile Function. +For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ + + + +## func [NormSf](/norm.go?s=2250:2308#L62) +``` go +func NormSf(x float64, loc float64, scale float64) float64 +``` +NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). + + + +## func [NormStats](/norm.go?s=5277:5345#L162) +``` go +func NormStats(loc float64, scale float64, moments string) []float64 +``` +NormStats returns the mean, variance, skew, and/or kurtosis. +Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +Takes string containing any of 'mvsk'. +Returns array of m v s k in that order. + + + +## func [NormStd](/norm.go?s=6814:6862#L216) +``` go +func NormStd(loc float64, scale float64) float64 +``` +NormStd is the standard deviation of the distribution. + + + +## func [NormVar](/norm.go?s=6675:6723#L211) +``` go +func NormVar(loc float64, scale float64) float64 +``` +NormVar is the variance of the distribution. + + + +## func [Pearson](/correlation.go?s=655:710#L33) +``` go +func Pearson(data1, data2 Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables + + + +## func [Percentile](/percentile.go?s=98:181#L8) +``` go +func Percentile(input Float64Data, percent float64) (percentile float64, err error) +``` +Percentile finds the relative standing in a slice of floats + + + +## func [PercentileNearestRank](/percentile.go?s=1079:1173#L54) +``` go +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) +``` +PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method + + + +## func [PopulationVariance](/variance.go?s=828:896#L31) +``` go +func PopulationVariance(input Float64Data) (pvar float64, err error) +``` +PopulationVariance finds the amount of variance within a population + + + +## func [Round](/round.go?s=88:154#L6) +``` go +func Round(input float64, places int) (rounded float64, err error) +``` +Round a float to a specific decimal place or precision + + + +## func [Sample](/sample.go?s=112:192#L9) +``` go +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + +## func [SampleVariance](/variance.go?s=1058:1122#L42) +``` go +func SampleVariance(input Float64Data) (svar float64, err error) +``` +SampleVariance finds the amount of variance within a sample + + + +## func [Sigmoid](/sigmoid.go?s=228:278#L9) +``` go +func Sigmoid(input Float64Data) ([]float64, error) +``` +Sigmoid returns the input values in the range of -1 to 1 +along the sigmoid or s-shaped curve, commonly used in +machine learning while training neural networks as an +activation function. + + + +## func [SoftMax](/softmax.go?s=206:256#L8) +``` go +func SoftMax(input Float64Data) ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. It +is commonly used in machine learning neural networks. + + + +## func [StableSample](/sample.go?s=974:1042#L50) +``` go +func StableSample(input Float64Data, takenum int) ([]float64, error) +``` +StableSample like stable sort, it returns samples from input while keeps the order of original data. + + + +## func [StandardDeviation](/deviation.go?s=695:762#L27) +``` go +func StandardDeviation(input Float64Data) (sdev float64, err error) +``` +StandardDeviation the amount of variation in the dataset + + + +## func [StandardDeviationPopulation](/deviation.go?s=892:969#L32) +``` go +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + +## func [StandardDeviationSample](/deviation.go?s=1254:1327#L46) +``` go +func StandardDeviationSample(input Float64Data) (sdev float64, err error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + +## func [StdDevP](/legacy.go?s=339:396#L14) +``` go +func StdDevP(input Float64Data) (sdev float64, err error) +``` +StdDevP is a shortcut to StandardDeviationPopulation + + + +## func [StdDevS](/legacy.go?s=497:554#L19) +``` go +func StdDevS(input Float64Data) (sdev float64, err error) +``` +StdDevS is a shortcut to StandardDeviationSample + + + +## func [Sum](/sum.go?s=78:130#L6) +``` go +func Sum(input Float64Data) (sum float64, err error) +``` +Sum adds all the numbers of a slice together + + + +## func [Trimean](/quartile.go?s=1320:1368#L65) +``` go +func Trimean(input Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + +## func [VarP](/legacy.go?s=59:113#L4) +``` go +func VarP(input Float64Data) (sdev float64, err error) +``` +VarP is a shortcut to PopulationVariance + + + +## func [VarS](/legacy.go?s=193:247#L9) +``` go +func VarS(input Float64Data) (sdev float64, err error) +``` +VarS is a shortcut to SampleVariance + + + +## func [Variance](/variance.go?s=659:717#L26) +``` go +func Variance(input Float64Data) (sdev float64, err error) +``` +Variance the amount of variation in the dataset + + + + +## type [Coordinate](/regression.go?s=143:183#L9) +``` go +type Coordinate struct { + X, Y float64 +} + +``` +Coordinate holds the data in a series + + + + + + + +### func [ExpReg](/legacy.go?s=791:856#L29) +``` go +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) +``` +ExpReg is a shortcut to ExponentialRegression + + +### func [LinReg](/legacy.go?s=643:708#L24) +``` go +func LinReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LinReg is a shortcut to LinearRegression + + +### func [LogReg](/legacy.go?s=944:1009#L34) +``` go +func LogReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LogReg is a shortcut to LogarithmicRegression + + + + + +## type [Float64Data](/data.go?s=80:106#L4) +``` go +type Float64Data []float64 +``` +Float64Data is a named type for []float64 with helper methods + + + + + + + +### func [LoadRawData](/load.go?s=119:168#L9) +``` go +func LoadRawData(raw interface{}) (f Float64Data) +``` +LoadRawData parses and converts a slice of mixed data types to floats + + + + + +### func (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91) +``` go +func (f Float64Data) AutoCorrelation(lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + + +### func (Float64Data) [Correlation](/data.go?s=3058:3122#L86) +``` go +func (f Float64Data) Correlation(d Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + + +### func (Float64Data) [Covariance](/data.go?s=4801:4864#L141) +``` go +func (f Float64Data) Covariance(d Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + + +### func (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146) +``` go +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables + + + + +### func (Float64Data) [CumulativeSum](/data.go?s=883:938#L28) +``` go +func (f Float64Data) CumulativeSum() ([]float64, error) +``` +CumulativeSum returns the cumulative sum of the data + + + + +### func (Float64Data) [Entropy](/data.go?s=5480:5527#L162) +``` go +func (f Float64Data) Entropy() (float64, error) +``` +Entropy provides calculation of the entropy + + + + +### func (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40) +``` go +func (f Float64Data) GeometricMean() (float64, error) +``` +GeometricMean returns the median of the data + + + + +### func (Float64Data) [Get](/data.go?s=129:168#L7) +``` go +func (f Float64Data) Get(i int) float64 +``` +Get item in slice + + + + +### func (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43) +``` go +func (f Float64Data) HarmonicMean() (float64, error) +``` +HarmonicMean returns the mode of the data + + + + +### func (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106) +``` go +func (f Float64Data) InterQuartileRange() (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + + +### func (Float64Data) [Len](/data.go?s=217:247#L10) +``` go +func (f Float64Data) Len() int +``` +Len returns length of slice + + + + +### func (Float64Data) [Less](/data.go?s=318:358#L13) +``` go +func (f Float64Data) Less(i, j int) bool +``` +Less returns if one number is less than another + + + + +### func (Float64Data) [Max](/data.go?s=645:688#L22) +``` go +func (f Float64Data) Max() (float64, error) +``` +Max returns the maximum number in the data + + + + +### func (Float64Data) [Mean](/data.go?s=1005:1049#L31) +``` go +func (f Float64Data) Mean() (float64, error) +``` +Mean returns the mean of the data + + + + +### func (Float64Data) [Median](/data.go?s=1111:1157#L34) +``` go +func (f Float64Data) Median() (float64, error) +``` +Median returns the median of the data + + + + +### func (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46) +``` go +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) +``` +MedianAbsoluteDeviation the median of the absolute deviations from the dataset median + + + + +### func (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51) +``` go +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + + +### func (Float64Data) [Midhinge](/data.go?s=3912:3973#L111) +``` go +func (f Float64Data) Midhinge(d Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + + +### func (Float64Data) [Min](/data.go?s=536:579#L19) +``` go +func (f Float64Data) Min() (float64, error) +``` +Min returns the minimum number in the data + + + + +### func (Float64Data) [Mode](/data.go?s=1217:1263#L37) +``` go +func (f Float64Data) Mode() ([]float64, error) +``` +Mode returns the mode of the data + + + + +### func (Float64Data) [Pearson](/data.go?s=3455:3515#L96) +``` go +func (f Float64Data) Pearson(d Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables. + + + + +### func (Float64Data) [Percentile](/data.go?s=2696:2755#L76) +``` go +func (f Float64Data) Percentile(p float64) (float64, error) +``` +Percentile finds the relative standing in a slice of floats + + + + +### func (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81) +``` go +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) +``` +PercentileNearestRank finds the relative standing using the Nearest Rank method + + + + +### func (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131) +``` go +func (f Float64Data) PopulationVariance() (float64, error) +``` +PopulationVariance finds the amount of variance within a population + + + + +### func (Float64Data) [Quartile](/data.go?s=3610:3673#L101) +``` go +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + +### func (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71) +``` go +func (f Float64Data) QuartileOutliers() (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + +### func (Float64Data) [Quartiles](/data.go?s=5628:5679#L167) +``` go +func (f Float64Data) Quartiles() (Quartiles, error) +``` +Quartiles returns the three quartile points from instance of Float64Data + + + + +### func (Float64Data) [Sample](/data.go?s=4208:4269#L121) +``` go +func (f Float64Data) Sample(n int, r bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + + +### func (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136) +``` go +func (f Float64Data) SampleVariance() (float64, error) +``` +SampleVariance finds the amount of variance within a sample + + + + +### func (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151) +``` go +func (f Float64Data) Sigmoid() ([]float64, error) +``` +Sigmoid returns the input values along the sigmoid or s-shaped curve + + + + +### func (Float64Data) [SoftMax](/data.go?s=5359:5408#L157) +``` go +func (f Float64Data) SoftMax() ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. + + + + +### func (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56) +``` go +func (f Float64Data) StandardDeviation() (float64, error) +``` +StandardDeviation the amount of variation in the dataset + + + + +### func (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61) +``` go +func (f Float64Data) StandardDeviationPopulation() (float64, error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + + +### func (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66) +``` go +func (f Float64Data) StandardDeviationSample() (float64, error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + + +### func (Float64Data) [Sum](/data.go?s=764:807#L25) +``` go +func (f Float64Data) Sum() (float64, error) +``` +Sum returns the total of all the numbers in the data + + + + +### func (Float64Data) [Swap](/data.go?s=425:460#L16) +``` go +func (f Float64Data) Swap(i, j int) +``` +Swap switches out two numbers in slice + + + + +### func (Float64Data) [Trimean](/data.go?s=4059:4119#L116) +``` go +func (f Float64Data) Trimean(d Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + + +### func (Float64Data) [Variance](/data.go?s=4350:4398#L126) +``` go +func (f Float64Data) Variance() (float64, error) +``` +Variance the amount of variation in the dataset + + + + +## type [Outliers](/outlier.go?s=73:139#L4) +``` go +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +``` +Outliers holds mild and extreme outliers found in data + + + + + + + +### func [QuartileOutliers](/outlier.go?s=197:255#L10) +``` go +func QuartileOutliers(input Float64Data) (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + + +## type [Quartiles](/quartile.go?s=75:136#L6) +``` go +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +``` +Quartiles holds the three quartile points + + + + + + + +### func [Quartile](/quartile.go?s=205:256#L13) +``` go +func Quartile(input Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + + +## type [Series](/regression.go?s=76:100#L6) +``` go +type Series []Coordinate +``` +Series is a container for a series of data + + + + + + + +### func [ExponentialRegression](/regression.go?s=1089:1157#L50) +``` go +func ExponentialRegression(s Series) (regressions Series, err error) +``` +ExponentialRegression returns an exponential regression on data series + + +### func [LinearRegression](/regression.go?s=262:325#L14) +``` go +func LinearRegression(s Series) (regressions Series, err error) +``` +LinearRegression finds the least squares linear regression on data series + + +### func [LogarithmicRegression](/regression.go?s=1903:1971#L85) +``` go +func LogarithmicRegression(s Series) (regressions Series, err error) +``` +LogarithmicRegression returns an logarithmic regression on data series + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/montanaflynn/stats/LICENSE b/vendor/github.com/montanaflynn/stats/LICENSE new file mode 100644 index 00000000..15909612 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile new file mode 100644 index 00000000..969df128 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/Makefile @@ -0,0 +1,34 @@ +.PHONY: all + +default: test lint + +format: + go fmt . + +test: + go test -race + +check: format test + +benchmark: + go test -bench=. -benchmem + +coverage: + go test -coverprofile=coverage.out + go tool cover -html="coverage.out" + +lint: format + golangci-lint run . + +docs: + godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md + +release: + git-chglog --output CHANGELOG.md --next-tag ${TAG} + git add CHANGELOG.md + git commit -m "Update changelog with ${TAG} changes" + git tag ${TAG} + git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt + git push origin master ${TAG} + hub release create --copy -F release-notes.txt ${TAG} + diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md new file mode 100644 index 00000000..4495c8dd --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/README.md @@ -0,0 +1,228 @@ +# Stats - Golang Statistics Package + +[![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] + +A well tested and comprehensive Golang statistics library / package / module with no dependencies. + +If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! + +## Installation + +``` +go get github.com/montanaflynn/stats +``` + +## Example Usage + +All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste: + +```go +// start with some source data to use +data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + +// you could also use different types like this +// data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) +// data := stats.LoadRawData([]interface{}{1.1, "2", 3}) +// etc... + +median, _ := stats.Median(data) +fmt.Println(median) // 3.65 + +roundedMedian, _ := stats.Round(median, 0) +fmt.Println(roundedMedian) // 4 +``` + +## Documentation + +The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats). + +You can also view docs offline with the following commands: + +``` +# Command line +godoc . # show all exported apis +godoc . Median # show a single function +godoc -ex . Round # show function with example +godoc . Float64Data # show the type and methods + +# Local website +godoc -http=:4444 # start the godoc server on port 4444 +open http://localhost:4444/pkg/github.com/montanaflynn/stats/ +``` + +The exported API is as follows: + +```go +var ( + ErrEmptyInput = statsError{"Input must not be empty."} + ErrNaN = statsError{"Not a number."} + ErrNegative = statsError{"Must not contain negative values."} + ErrZero = statsError{"Must not contain zero values."} + ErrBounds = statsError{"Input is outside of range."} + ErrSize = statsError{"Must be the same length."} + ErrInfValue = statsError{"Value is infinite."} + ErrYCoord = statsError{"Y Value must be greater than zero."} +) + +func Round(input float64, places int) (rounded float64, err error) {} + +type Float64Data []float64 + +func LoadRawData(raw interface{}) (f Float64Data) {} + +func AutoCorrelation(data Float64Data, lags int) (float64, error) {} +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Correlation(data1, data2 Float64Data) (float64, error) {} +func Covariance(data1, data2 Float64Data) (float64, error) {} +func CovariancePopulation(data1, data2 Float64Data) (float64, error) {} +func CumulativeSum(input Float64Data) ([]float64, error) {} +func Entropy(input Float64Data) (float64, error) {} +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func GeometricMean(input Float64Data) (float64, error) {} +func HarmonicMean(input Float64Data) (float64, error) {} +func InterQuartileRange(input Float64Data) (float64, error) {} +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Max(input Float64Data) (max float64, err error) {} +func Mean(input Float64Data) (float64, error) {} +func Median(input Float64Data) (median float64, err error) {} +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {} +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {} +func Midhinge(input Float64Data) (float64, error) {} +func Min(input Float64Data) (min float64, err error) {} +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {} +func Mode(input Float64Data) (mode []float64, err error) {} +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {} +func NormCdf(x float64, loc float64, scale float64) float64 {} +func NormEntropy(loc float64, scale float64) float64 {} +func NormFit(data []float64) [2]float64{} +func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {} +func NormIsf(p float64, loc float64, scale float64) (x float64) {} +func NormLogCdf(x float64, loc float64, scale float64) float64 {} +func NormLogPdf(x float64, loc float64, scale float64) float64 {} +func NormLogSf(x float64, loc float64, scale float64) float64 {} +func NormMean(loc float64, scale float64) float64 {} +func NormMedian(loc float64, scale float64) float64 {} +func NormMoment(n int, loc float64, scale float64) float64 {} +func NormPdf(x float64, loc float64, scale float64) float64 {} +func NormPpf(p float64, loc float64, scale float64) (x float64) {} +func NormPpfRvs(loc float64, scale float64, size int) []float64 {} +func NormSf(x float64, loc float64, scale float64) float64 {} +func NormStats(loc float64, scale float64, moments string) []float64 {} +func NormStd(loc float64, scale float64) float64 {} +func NormVar(loc float64, scale float64) float64 {} +func Pearson(data1, data2 Float64Data) (float64, error) {} +func Percentile(input Float64Data, percent float64) (percentile float64, err error) {} +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {} +func PopulationVariance(input Float64Data) (pvar float64, err error) {} +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {} +func SampleVariance(input Float64Data) (svar float64, err error) {} +func Sigmoid(input Float64Data) ([]float64, error) {} +func SoftMax(input Float64Data) ([]float64, error) {} +func StableSample(input Float64Data, takenum int) ([]float64, error) {} +func StandardDeviation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationSample(input Float64Data) (sdev float64, err error) {} +func StdDevP(input Float64Data) (sdev float64, err error) {} +func StdDevS(input Float64Data) (sdev float64, err error) {} +func Sum(input Float64Data) (sum float64, err error) {} +func Trimean(input Float64Data) (float64, error) {} +func VarP(input Float64Data) (sdev float64, err error) {} +func VarS(input Float64Data) (sdev float64, err error) {} +func Variance(input Float64Data) (sdev float64, err error) {} + +type Coordinate struct { + X, Y float64 +} + +type Series []Coordinate + +func ExponentialRegression(s Series) (regressions Series, err error) {} +func LinearRegression(s Series) (regressions Series, err error) {} +func LogarithmicRegression(s Series) (regressions Series, err error) {} + +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +func Quartile(input Float64Data) (Quartiles, error) {} +func QuartileOutliers(input Float64Data) (Outliers, error) {} +``` + +## Contributing + +Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. + +1. Fork the repo and clone your fork +2. Create new branch (`git checkout -b some-thing`) +3. Make the desired changes +4. Ensure tests pass (`go test -cover` or `make test`) +5. Run lint and fix problems (`go vet .` or `make lint`) +6. Commit changes (`git commit -am 'Did something'`) +7. Push branch (`git push origin some-thing`) +8. Submit pull request + +To make things as seamless as possible please also consider the following steps: + +- Update `examples/main.go` with a simple example of the new feature +- Update `README.md` documentation section with any new exported API +- Keep 100% code coverage (you can check with `make coverage`) +- Squash commits into single units of work with `git rebase -i new-feature` + +## Releasing + +To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). + +First install the tools used to generate the markdown files: + +``` +go get github.com/davecheney/godoc2md +go get github.com/golangci/golangci-lint/cmd/golangci-lint +``` + +Then you can run these `make` directives: + +``` +# Generate DOCUMENTATION.md +make docs +``` + +Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release: + +``` +make release TAG=v0.x.x +``` + +## MIT License + +Copyright (c) 2014-2021 Montana Flynn (https://montanaflynn.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[travis-url]: https://travis-ci.org/montanaflynn/stats +[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg + +[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master +[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg + +[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats +[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats + +[godoc-url]: https://godoc.org/github.com/montanaflynn/stats +[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg + +[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats +[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg + +[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE +[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go new file mode 100644 index 00000000..4acab94d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/correlation.go @@ -0,0 +1,60 @@ +package stats + +import ( + "math" +) + +// Correlation describes the degree of relationship between two sets of data +func Correlation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + sdev1, _ := StandardDeviationPopulation(data1) + sdev2, _ := StandardDeviationPopulation(data2) + + if sdev1 == 0 || sdev2 == 0 { + return 0, nil + } + + covp, _ := CovariancePopulation(data1, data2) + return covp / (sdev1 * sdev2), nil +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables +func Pearson(data1, data2 Float64Data) (float64, error) { + return Correlation(data1, data2) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func AutoCorrelation(data Float64Data, lags int) (float64, error) { + if len(data) < 1 { + return 0, EmptyInputErr + } + + mean, _ := Mean(data) + + var result, q float64 + + for i := 0; i < lags; i++ { + v := (data[0] - mean) * (data[0] - mean) + for i := 1; i < len(data); i++ { + delta0 := data[i-1] - mean + delta1 := data[i] - mean + q += (delta0*delta1 - q) / float64(i+1) + v += (delta1*delta1 - v) / float64(i+1) + } + + result = q / v + } + + return result, nil +} diff --git a/vendor/github.com/montanaflynn/stats/cumulative_sum.go b/vendor/github.com/montanaflynn/stats/cumulative_sum.go new file mode 100644 index 00000000..e5305daf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/cumulative_sum.go @@ -0,0 +1,21 @@ +package stats + +// CumulativeSum calculates the cumulative sum of the input slice +func CumulativeSum(input Float64Data) ([]float64, error) { + + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + cumSum := make([]float64, input.Len()) + + for i, val := range input { + if i == 0 { + cumSum[i] = val + } else { + cumSum[i] = cumSum[i-1] + val + } + } + + return cumSum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go new file mode 100644 index 00000000..b86f0d84 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/data.go @@ -0,0 +1,169 @@ +package stats + +// Float64Data is a named type for []float64 with helper methods +type Float64Data []float64 + +// Get item in slice +func (f Float64Data) Get(i int) float64 { return f[i] } + +// Len returns length of slice +func (f Float64Data) Len() int { return len(f) } + +// Less returns if one number is less than another +func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } + +// Swap switches out two numbers in slice +func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// Min returns the minimum number in the data +func (f Float64Data) Min() (float64, error) { return Min(f) } + +// Max returns the maximum number in the data +func (f Float64Data) Max() (float64, error) { return Max(f) } + +// Sum returns the total of all the numbers in the data +func (f Float64Data) Sum() (float64, error) { return Sum(f) } + +// CumulativeSum returns the cumulative sum of the data +func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) } + +// Mean returns the mean of the data +func (f Float64Data) Mean() (float64, error) { return Mean(f) } + +// Median returns the median of the data +func (f Float64Data) Median() (float64, error) { return Median(f) } + +// Mode returns the mode of the data +func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } + +// GeometricMean returns the median of the data +func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } + +// HarmonicMean returns the mode of the data +func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } + +// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { + return MedianAbsoluteDeviation(f) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { + return MedianAbsoluteDeviationPopulation(f) +} + +// StandardDeviation the amount of variation in the dataset +func (f Float64Data) StandardDeviation() (float64, error) { + return StandardDeviation(f) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func (f Float64Data) StandardDeviationPopulation() (float64, error) { + return StandardDeviationPopulation(f) +} + +// StandardDeviationSample finds the amount of variation from a sample +func (f Float64Data) StandardDeviationSample() (float64, error) { + return StandardDeviationSample(f) +} + +// QuartileOutliers finds the mild and extreme outliers +func (f Float64Data) QuartileOutliers() (Outliers, error) { + return QuartileOutliers(f) +} + +// Percentile finds the relative standing in a slice of floats +func (f Float64Data) Percentile(p float64) (float64, error) { + return Percentile(f, p) +} + +// PercentileNearestRank finds the relative standing using the Nearest Rank method +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { + return PercentileNearestRank(f, p) +} + +// Correlation describes the degree of relationship between two sets of data +func (f Float64Data) Correlation(d Float64Data) (float64, error) { + return Correlation(f, d) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func (f Float64Data) AutoCorrelation(lags int) (float64, error) { + return AutoCorrelation(f, lags) +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func (f Float64Data) Pearson(d Float64Data) (float64, error) { + return Pearson(f, d) +} + +// Quartile returns the three quartile points from a slice of data +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { + return Quartile(d) +} + +// InterQuartileRange finds the range between Q1 and Q3 +func (f Float64Data) InterQuartileRange() (float64, error) { + return InterQuartileRange(f) +} + +// Midhinge finds the average of the first and third quartiles +func (f Float64Data) Midhinge(d Float64Data) (float64, error) { + return Midhinge(d) +} + +// Trimean finds the average of the median and the midhinge +func (f Float64Data) Trimean(d Float64Data) (float64, error) { + return Trimean(d) +} + +// Sample returns sample from input with replacement or without +func (f Float64Data) Sample(n int, r bool) ([]float64, error) { + return Sample(f, n, r) +} + +// Variance the amount of variation in the dataset +func (f Float64Data) Variance() (float64, error) { + return Variance(f) +} + +// PopulationVariance finds the amount of variance within a population +func (f Float64Data) PopulationVariance() (float64, error) { + return PopulationVariance(f) +} + +// SampleVariance finds the amount of variance within a sample +func (f Float64Data) SampleVariance() (float64, error) { + return SampleVariance(f) +} + +// Covariance is a measure of how much two sets of data change +func (f Float64Data) Covariance(d Float64Data) (float64, error) { + return Covariance(f, d) +} + +// CovariancePopulation computes covariance for entire population between two variables +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { + return CovariancePopulation(f, d) +} + +// Sigmoid returns the input values along the sigmoid or s-shaped curve +func (f Float64Data) Sigmoid() ([]float64, error) { + return Sigmoid(f) +} + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. +func (f Float64Data) SoftMax() ([]float64, error) { + return SoftMax(f) +} + +// Entropy provides calculation of the entropy +func (f Float64Data) Entropy() (float64, error) { + return Entropy(f) +} + +// Quartiles returns the three quartile points from instance of Float64Data +func (f Float64Data) Quartiles() (Quartiles, error) { + return Quartile(f) +} diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go new file mode 100644 index 00000000..e69a19f6 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/deviation.go @@ -0,0 +1,57 @@ +package stats + +import "math" + +// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { + return MedianAbsoluteDeviationPopulation(input) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + i := copyslice(input) + m, _ := Median(i) + + for key, value := range i { + i[key] = math.Abs(value - m) + } + + return Median(i) +} + +// StandardDeviation the amount of variation in the dataset +func StandardDeviation(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the population variance + vp, _ := PopulationVariance(input) + + // Return the population standard deviation + return math.Sqrt(vp), nil +} + +// StandardDeviationSample finds the amount of variation from a sample +func StandardDeviationSample(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sample variance + vs, _ := SampleVariance(input) + + // Return the sample standard deviation + return math.Sqrt(vs), nil +} diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go new file mode 100644 index 00000000..c2b7d8f8 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/distances.go @@ -0,0 +1,88 @@ +package stats + +import ( + "math" +) + +// Validate data for distance calculation +func validateData(dataPointX, dataPointY Float64Data) error { + if len(dataPointX) == 0 || len(dataPointY) == 0 { + return EmptyInputErr + } + + if len(dataPointX) != len(dataPointY) { + return SizeErr + } + return nil +} + +// ChebyshevDistance computes the Chebyshev distance between two data sets +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + var tempDistance float64 + for i := 0; i < len(dataPointY); i++ { + tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) + if distance < tempDistance { + distance = tempDistance + } + } + return distance, nil +} + +// EuclideanDistance computes the Euclidean distance between two data sets +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) + } + return math.Sqrt(distance), nil +} + +// ManhattanDistance computes the Manhattan distance between two data sets +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) + } + return distance, nil +} + +// MinkowskiDistance computes the Minkowski distance between two data sets +// +// Arguments: +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// Return: +// Distance or error +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + for i := 0; i < len(dataPointY); i++ { + distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) + } + distance = math.Pow(distance, 1/lambda) + if math.IsInf(distance, 1) { + return math.NaN(), InfValue + } + return distance, nil +} diff --git a/vendor/github.com/montanaflynn/stats/doc.go b/vendor/github.com/montanaflynn/stats/doc.go new file mode 100644 index 00000000..facb8d57 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/doc.go @@ -0,0 +1,23 @@ +/* +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) +*/ +package stats diff --git a/vendor/github.com/montanaflynn/stats/entropy.go b/vendor/github.com/montanaflynn/stats/entropy.go new file mode 100644 index 00000000..95263b0f --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/entropy.go @@ -0,0 +1,31 @@ +package stats + +import "math" + +// Entropy provides calculation of the entropy +func Entropy(input Float64Data) (float64, error) { + input, err := normalize(input) + if err != nil { + return math.NaN(), err + } + var result float64 + for i := 0; i < input.Len(); i++ { + v := input.Get(i) + if v == 0 { + continue + } + result += (v * math.Log(v)) + } + return -result, nil +} + +func normalize(input Float64Data) (Float64Data, error) { + sum, err := input.Sum() + if err != nil { + return Float64Data{}, err + } + for i := 0; i < input.Len(); i++ { + input[i] = input[i] / sum + } + return input, nil +} diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go new file mode 100644 index 00000000..95f82ff7 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/errors.go @@ -0,0 +1,35 @@ +package stats + +type statsError struct { + err string +} + +func (s statsError) Error() string { + return s.err +} + +func (s statsError) String() string { + return s.err +} + +// These are the package-wide error values. +// All error identification should use these values. +// https://github.com/golang/go/wiki/Errors#naming +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go new file mode 100644 index 00000000..0f3d1e8b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/legacy.go @@ -0,0 +1,49 @@ +package stats + +// VarP is a shortcut to PopulationVariance +func VarP(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// VarS is a shortcut to SampleVariance +func VarS(input Float64Data) (sdev float64, err error) { + return SampleVariance(input) +} + +// StdDevP is a shortcut to StandardDeviationPopulation +func StdDevP(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StdDevS is a shortcut to StandardDeviationSample +func StdDevS(input Float64Data) (sdev float64, err error) { + return StandardDeviationSample(input) +} + +// LinReg is a shortcut to LinearRegression +func LinReg(s []Coordinate) (regressions []Coordinate, err error) { + return LinearRegression(s) +} + +// ExpReg is a shortcut to ExponentialRegression +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { + return ExponentialRegression(s) +} + +// LogReg is a shortcut to LogarithmicRegression +func LogReg(s []Coordinate) (regressions []Coordinate, err error) { + return LogarithmicRegression(s) +} + +// Legacy error names that didn't start with Err +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go new file mode 100644 index 00000000..0eb0e272 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/load.go @@ -0,0 +1,199 @@ +package stats + +import ( + "bufio" + "io" + "strconv" + "strings" + "time" +) + +// LoadRawData parses and converts a slice of mixed data types to floats +func LoadRawData(raw interface{}) (f Float64Data) { + var r []interface{} + var s Float64Data + + switch t := raw.(type) { + case []interface{}: + r = t + case []uint: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []bool: + for _, v := range t { + if v { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case []float64: + return Float64Data(t) + case []int: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []string: + for _, v := range t { + r = append(r, v) + } + case []time.Duration: + for _, v := range t { + r = append(r, v) + } + case map[int]int: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]string: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case map[int]uint: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]bool: + for i := 0; i < len(t); i++ { + if t[i] { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case map[int]float64: + for i := 0; i < len(t); i++ { + s = append(s, t[i]) + } + return s + case map[int]time.Duration: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case string: + for _, v := range strings.Fields(t) { + r = append(r, v) + } + case io.Reader: + scanner := bufio.NewScanner(t) + for scanner.Scan() { + l := scanner.Text() + for _, v := range strings.Fields(l) { + r = append(r, v) + } + } + } + + for _, v := range r { + switch t := v.(type) { + case int: + a := float64(t) + f = append(f, a) + case uint: + f = append(f, float64(t)) + case float64: + f = append(f, t) + case string: + fl, err := strconv.ParseFloat(t, 64) + if err == nil { + f = append(f, fl) + } + case bool: + if t { + f = append(f, 1.0) + } else { + f = append(f, 0.0) + } + case time.Duration: + f = append(f, float64(t)) + } + } + return f +} diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go new file mode 100644 index 00000000..bb8c83c3 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/max.go @@ -0,0 +1,26 @@ +package stats + +import ( + "math" +) + +// Max finds the highest number in a slice +func Max(input Float64Data) (max float64, err error) { + + // Return an error if there are no numbers + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + max = input.Get(0) + + // Loop and replace higher values + for i := 1; i < input.Len(); i++ { + if input.Get(i) > max { + max = input.Get(i) + } + } + + return max, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go new file mode 100644 index 00000000..a78d299a --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mean.go @@ -0,0 +1,60 @@ +package stats + +import "math" + +// Mean gets the average of a slice of numbers +func Mean(input Float64Data) (float64, error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + sum, _ := input.Sum() + + return sum / float64(input.Len()), nil +} + +// GeometricMean gets the geometric mean for a slice of numbers +func GeometricMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the product of all the numbers + var p float64 + for _, n := range input { + if p == 0 { + p = n + } else { + p *= n + } + } + + // Calculate the geometric mean + return math.Pow(p, 1/float64(l)), nil +} + +// HarmonicMean gets the harmonic mean for a slice of numbers +func HarmonicMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sum of all the numbers reciprocals and return an + // error for values that cannot be included in harmonic mean + var p float64 + for _, n := range input { + if n < 0 { + return math.NaN(), NegativeErr + } else if n == 0 { + return math.NaN(), ZeroErr + } + p += (1 / n) + } + + return float64(l) / p, nil +} diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go new file mode 100644 index 00000000..a678c365 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/median.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// Median gets the median number in a slice of numbers +func Median(input Float64Data) (median float64, err error) { + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // No math is needed if there are no numbers + // For even numbers we add the two middle numbers + // and divide by two using the mean function above + // For odd numbers we just use the middle number + l := len(c) + if l == 0 { + return math.NaN(), EmptyInputErr + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = c[l/2] + } + + return median, nil +} diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go new file mode 100644 index 00000000..bf7e70ac --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/min.go @@ -0,0 +1,26 @@ +package stats + +import "math" + +// Min finds the lowest number in a set of data +func Min(input Float64Data) (min float64, err error) { + + // Get the count of numbers in the slice + l := input.Len() + + // Return an error if there are no numbers + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + min = input.Get(0) + + // Iterate until done checking for a lower value + for i := 1; i < l; i++ { + if input.Get(i) < min { + min = input.Get(i) + } + } + return min, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go new file mode 100644 index 00000000..a7cf9f7a --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mode.go @@ -0,0 +1,47 @@ +package stats + +// Mode gets the mode [most frequent value(s)] of a slice of float64s +func Mode(input Float64Data) (mode []float64, err error) { + // Return the input if there's only one number + l := input.Len() + if l == 1 { + return input, nil + } else if l == 0 { + return nil, EmptyInputErr + } + + c := sortedCopyDif(input) + // Traverse sorted array, + // tracking the longest repeating sequence + mode = make([]float64, 5) + cnt, maxCnt := 1, 1 + for i := 1; i < l; i++ { + switch { + case c[i] == c[i-1]: + cnt++ + case cnt == maxCnt && maxCnt != 1: + mode = append(mode, c[i-1]) + cnt = 1 + case cnt > maxCnt: + mode = append(mode[:0], c[i-1]) + maxCnt, cnt = cnt, 1 + default: + cnt = 1 + } + } + switch { + case cnt == maxCnt: + mode = append(mode, c[l-1]) + case cnt > maxCnt: + mode = append(mode[:0], c[l-1]) + maxCnt = cnt + } + + // Since length must be greater than 1, + // check for slices of distinct values + if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l { + return Float64Data{}, nil + } + + return mode, nil +} diff --git a/vendor/github.com/montanaflynn/stats/norm.go b/vendor/github.com/montanaflynn/stats/norm.go new file mode 100644 index 00000000..4eb8eb8b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/norm.go @@ -0,0 +1,254 @@ +package stats + +import ( + "math" + "math/rand" + "strings" + "time" +) + +// NormPpfRvs generates random variates using the Point Percentile Function. +// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ +func NormPpfRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < size; i++ { + toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale)) + } + return toReturn +} + +// NormBoxMullerRvs generates random variates using the Box–Muller transform. +// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < int(float64(size/2)+float64(size%2)); i++ { + // u1 and u2 are uniformly distributed random numbers between 0 and 1. + u1 := rand.Float64() + u2 := rand.Float64() + // x1 and x2 are normally distributed random numbers. + x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2))) + toReturn = append(toReturn, x1) + if (i+1)*2 <= size { + x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2))) + toReturn = append(toReturn, x2) + } + } + return toReturn +} + +// NormPdf is the probability density function. +func NormPdf(x float64, loc float64, scale float64) float64 { + return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)) +} + +// NormLogPdf is the log of the probability density function. +func NormLogPdf(x float64, loc float64, scale float64) float64 { + return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))) +} + +// NormCdf is the cumulative distribution function. +func NormCdf(x float64, loc float64, scale float64) float64 { + return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogCdf is the log of the cumulative distribution function. +func NormLogCdf(x float64, loc float64, scale float64) float64 { + return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). +func NormSf(x float64, loc float64, scale float64) float64 { + return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogSf is the log of the survival function. +func NormLogSf(x float64, loc float64, scale float64) float64 { + return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormPpf is the point percentile function. +// This is based on Peter John Acklam's inverse normal CDF. +// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ +func NormPpf(p float64, loc float64, scale float64) (x float64) { + const ( + a1 = -3.969683028665376e+01 + a2 = 2.209460984245205e+02 + a3 = -2.759285104469687e+02 + a4 = 1.383577518672690e+02 + a5 = -3.066479806614716e+01 + a6 = 2.506628277459239e+00 + + b1 = -5.447609879822406e+01 + b2 = 1.615858368580409e+02 + b3 = -1.556989798598866e+02 + b4 = 6.680131188771972e+01 + b5 = -1.328068155288572e+01 + + c1 = -7.784894002430293e-03 + c2 = -3.223964580411365e-01 + c3 = -2.400758277161838e+00 + c4 = -2.549732539343734e+00 + c5 = 4.374664141464968e+00 + c6 = 2.938163982698783e+00 + + d1 = 7.784695709041462e-03 + d2 = 3.224671290700398e-01 + d3 = 2.445134137142996e+00 + d4 = 3.754408661907416e+00 + + plow = 0.02425 + phigh = 1 - plow + ) + + if p < 0 || p > 1 { + return math.NaN() + } else if p == 0 { + return -math.Inf(0) + } else if p == 1 { + return math.Inf(0) + } + + if p < plow { + q := math.Sqrt(-2 * math.Log(p)) + x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else if phigh < p { + q := math.Sqrt(-2 * math.Log(1-p)) + x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else { + q := p - 0.5 + r := q * q + x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q / + (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1) + } + + e := 0.5*math.Erfc(-x/math.Sqrt2) - p + u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2) + x = x - u/(1+x*u/2) + + return x*scale + loc +} + +// NormIsf is the inverse survival function (inverse of sf). +func NormIsf(p float64, loc float64, scale float64) (x float64) { + if -NormPpf(p, loc, scale) == 0 { + return 0 + } + return -NormPpf(p, loc, scale) +} + +// NormMoment approximates the non-central (raw) moment of order n. +// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution +func NormMoment(n int, loc float64, scale float64) float64 { + toReturn := 0.0 + for i := 0; i < n+1; i++ { + if (n-i)%2 == 0 { + toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) * + (float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) * + float64(factorial((n-i)/2)))) + } + } + return toReturn +} + +// NormStats returns the mean, variance, skew, and/or kurtosis. +// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +// Takes string containing any of 'mvsk'. +// Returns array of m v s k in that order. +func NormStats(loc float64, scale float64, moments string) []float64 { + var toReturn []float64 + if strings.ContainsAny(moments, "m") { + toReturn = append(toReturn, loc) + } + if strings.ContainsAny(moments, "v") { + toReturn = append(toReturn, math.Pow(scale, 2)) + } + if strings.ContainsAny(moments, "s") { + toReturn = append(toReturn, 0.0) + } + if strings.ContainsAny(moments, "k") { + toReturn = append(toReturn, 0.0) + } + return toReturn +} + +// NormEntropy is the differential entropy of the RV. +func NormEntropy(loc float64, scale float64) float64 { + return math.Log(scale * math.Sqrt(2*math.Pi*math.E)) +} + +// NormFit returns the maximum likelihood estimators for the Normal Distribution. +// Takes array of float64 values. +// Returns array of Mean followed by Standard Deviation. +func NormFit(data []float64) [2]float64 { + sum := 0.00 + for i := 0; i < len(data); i++ { + sum += data[i] + } + mean := sum / float64(len(data)) + stdNumerator := 0.00 + for i := 0; i < len(data); i++ { + stdNumerator += math.Pow(data[i]-mean, 2) + } + return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))} +} + +// NormMedian is the median of the distribution. +func NormMedian(loc float64, scale float64) float64 { + return loc +} + +// NormMean is the mean/expected value of the distribution. +func NormMean(loc float64, scale float64) float64 { + return loc +} + +// NormVar is the variance of the distribution. +func NormVar(loc float64, scale float64) float64 { + return math.Pow(scale, 2) +} + +// NormStd is the standard deviation of the distribution. +func NormStd(loc float64, scale float64) float64 { + return scale +} + +// NormInterval finds endpoints of the range that contains alpha percent of the distribution. +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 { + q1 := (1.0 - alpha) / 2 + q2 := (1.0 + alpha) / 2 + a := NormPpf(q1, loc, scale) + b := NormPpf(q2, loc, scale) + return [2]float64{a, b} +} + +// factorial is the naive factorial algorithm. +func factorial(x int) int { + if x == 0 { + return 1 + } + return x * factorial(x-1) +} + +// Ncr is an N choose R algorithm. +// Aaron Cannon's algorithm. +func Ncr(n, r int) int { + if n <= 1 || r == 0 || n == r { + return 1 + } + if newR := n - r; newR < r { + r = newR + } + if r == 1 { + return n + } + ret := int(n - r + 1) + for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 { + ret = ret * i / j + } + return ret +} diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go new file mode 100644 index 00000000..7c9795bd --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/outlier.go @@ -0,0 +1,44 @@ +package stats + +// Outliers holds mild and extreme outliers found in data +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +// QuartileOutliers finds the mild and extreme outliers +func QuartileOutliers(input Float64Data) (Outliers, error) { + if input.Len() == 0 { + return Outliers{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Calculate the quartiles and interquartile range + qs, _ := Quartile(copy) + iqr, _ := InterQuartileRange(copy) + + // Calculate the lower and upper inner and outer fences + lif := qs.Q1 - (1.5 * iqr) + uif := qs.Q3 + (1.5 * iqr) + lof := qs.Q1 - (3 * iqr) + uof := qs.Q3 + (3 * iqr) + + // Find the data points that are outside of the + // inner and upper fences and add them to mild + // and extreme outlier slices + var mild Float64Data + var extreme Float64Data + for _, v := range copy { + + if v < lof || v > uof { + extreme = append(extreme, v) + } else if v < lif || v > uif { + mild = append(mild, v) + } + } + + // Wrap them into our struct + return Outliers{mild, extreme}, nil +} diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go new file mode 100644 index 00000000..f5641783 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/percentile.go @@ -0,0 +1,86 @@ +package stats + +import ( + "math" +) + +// Percentile finds the relative standing in a slice of floats +func Percentile(input Float64Data, percent float64) (percentile float64, err error) { + length := input.Len() + if length == 0 { + return math.NaN(), EmptyInputErr + } + + if length == 1 { + return input[0], nil + } + + if percent <= 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Multiply percent by length of input + index := (percent / 100) * float64(len(c)) + + // Check if the index is a whole number + if index == float64(int64(index)) { + + // Convert float to int + i := int(index) + + // Find the value at the index + percentile = c[i-1] + + } else if index > 1 { + + // Convert float to int via truncation + i := int(index) + + // Find the average of the index and following values + percentile, _ = Mean(Float64Data{c[i-1], c[i]}) + + } else { + return math.NaN(), BoundsErr + } + + return percentile, nil + +} + +// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { + + // Find the length of items in the slice + il := input.Len() + + // Return an error for empty slices + if il == 0 { + return math.NaN(), EmptyInputErr + } + + // Return error for less than 0 or greater than 100 percentages + if percent < 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Return the last item + if percent == 100.0 { + return c[il-1], nil + } + + // Find ordinal ranking + or := int(math.Ceil(float64(il) * percent / 100)) + + // Return the item that is in the place of the ordinal rank + if or == 0 { + return c[0], nil + } + return c[or-1], nil + +} diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go new file mode 100644 index 00000000..40bbf6e5 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/quartile.go @@ -0,0 +1,74 @@ +package stats + +import "math" + +// Quartiles holds the three quartile points +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +// Quartile returns the three quartile points from a slice of data +func Quartile(input Float64Data) (Quartiles, error) { + + il := input.Len() + if il == 0 { + return Quartiles{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Find the cutoff places depeding on if + // the input slice length is even or odd + var c1 int + var c2 int + if il%2 == 0 { + c1 = il / 2 + c2 = il / 2 + } else { + c1 = (il - 1) / 2 + c2 = c1 + 1 + } + + // Find the Medians with the cutoff points + Q1, _ := Median(copy[:c1]) + Q2, _ := Median(copy) + Q3, _ := Median(copy[c2:]) + + return Quartiles{Q1, Q2, Q3}, nil + +} + +// InterQuartileRange finds the range between Q1 and Q3 +func InterQuartileRange(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + iqr := qs.Q3 - qs.Q1 + return iqr, nil +} + +// Midhinge finds the average of the first and third quartiles +func Midhinge(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + mh := (qs.Q1 + qs.Q3) / 2 + return mh, nil +} + +// Trimean finds the average of the median and the midhinge +func Trimean(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + c := sortedCopy(input) + q, _ := Quartile(c) + + return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil +} diff --git a/vendor/github.com/montanaflynn/stats/ranksum.go b/vendor/github.com/montanaflynn/stats/ranksum.go new file mode 100644 index 00000000..fc424ef4 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/ranksum.go @@ -0,0 +1,183 @@ +package stats + +// import "math" +// +// // WilcoxonRankSum tests the null hypothesis that two sets +// // of data are drawn from the same distribution. It does +// // not handle ties between measurements in x and y. +// // +// // Parameters: +// // data1 Float64Data: First set of data points. +// // data2 Float64Data: Second set of data points. +// // Length of both data samples must be equal. +// // +// // Return: +// // statistic float64: The test statistic under the +// // large-sample approximation that the +// // rank sum statistic is normally distributed. +// // pvalue float64: The two-sided p-value of the test +// // err error: Any error from the input data parameters +// // +// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test +// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) { +// +// l1 := data1.Len() +// l2 := data2.Len() +// +// if l1 == 0 || l2 == 0 { +// return math.NaN(), math.NaN(), EmptyInputErr +// } +// +// if l1 != l2 { +// return math.NaN(), math.NaN(), SizeErr +// } +// +// alldata := Float64Data{} +// alldata = append(alldata, data1...) +// alldata = append(alldata, data2...) +// +// // ranked := +// +// return 0.0, 0.0, nil +// } +// +// // x, y = map(np.asarray, (x, y)) +// // n1 = len(x) +// // n2 = len(y) +// // alldata = np.concatenate((x, y)) +// // ranked = rankdata(alldata) +// // x = ranked[:n1] +// // s = np.sum(x, axis=0) +// // expected = n1 * (n1+n2+1) / 2.0 +// // z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) +// // prob = 2 * distributions.norm.sf(abs(z)) +// // +// // return RanksumsResult(z, prob) +// +// // def rankdata(a, method='average'): +// // """ +// // Assign ranks to data, dealing with ties appropriately. +// // Ranks begin at 1. The `method` argument controls how ranks are assigned +// // to equal values. See [1]_ for further discussion of ranking methods. +// // Parameters +// // ---------- +// // a : array_like +// // The array of values to be ranked. The array is first flattened. +// // method : str, optional +// // The method used to assign ranks to tied elements. +// // The options are 'average', 'min', 'max', 'dense' and 'ordinal'. +// // 'average': +// // The average of the ranks that would have been assigned to +// // all the tied values is assigned to each value. +// // 'min': +// // The minimum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. (This is also +// // referred to as "competition" ranking.) +// // 'max': +// // The maximum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. +// // 'dense': +// // Like 'min', but the rank of the next highest element is assigned +// // the rank immediately after those assigned to the tied elements. +// // 'ordinal': +// // All values are given a distinct rank, corresponding to the order +// // that the values occur in `a`. +// // The default is 'average'. +// // Returns +// // ------- +// // ranks : ndarray +// // An array of length equal to the size of `a`, containing rank +// // scores. +// // References +// // ---------- +// // .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking +// // Examples +// // -------- +// // >>> from scipy.stats import rankdata +// // >>> rankdata([0, 2, 3, 2]) +// // array([ 1. , 2.5, 4. , 2.5]) +// // """ +// // +// // arr = np.ravel(np.asarray(a)) +// // algo = 'quicksort' +// // sorter = np.argsort(arr, kind=algo) +// // +// // inv = np.empty(sorter.size, dtype=np.intp) +// // inv[sorter] = np.arange(sorter.size, dtype=np.intp) +// // +// // +// // arr = arr[sorter] +// // obs = np.r_[True, arr[1:] != arr[:-1]] +// // dense = obs.cumsum()[inv] +// // +// // +// // # cumulative counts of each unique value +// // count = np.r_[np.nonzero(obs)[0], len(obs)] +// // +// // # average method +// // return .5 * (count[dense] + count[dense - 1] + 1) +// +// type rankable interface { +// Len() int +// RankEqual(int, int) bool +// } +// +// func StandardRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k = i + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func ModifiedRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// k := i + 1 +// for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// k = j + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func DenseRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k++ +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func OrdinalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// r[i] = float64(i + 1) +// } +// return r +// } +// +// func FractionalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := 0; i < len(r); { +// var j int +// f := float64(i + 1) +// for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// f += float64(j + 1) +// } +// f /= float64(j - i) +// for ; i < j; i++ { +// r[i] = f +// } +// } +// return r +// } diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go new file mode 100644 index 00000000..401d9512 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/regression.go @@ -0,0 +1,113 @@ +package stats + +import "math" + +// Series is a container for a series of data +type Series []Coordinate + +// Coordinate holds the data in a series +type Coordinate struct { + X, Y float64 +} + +// LinearRegression finds the least squares linear regression on data series +func LinearRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + // Placeholder for the math to be done + var sum [5]float64 + + // Loop over data keeping index in place + i := 0 + for ; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X + sum[3] += s[i].X * s[i].Y + sum[4] += s[i].Y * s[i].Y + } + + // Find gradient and intercept + f := float64(i) + gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) + intercept := (sum[1] / f) - (gradient * sum[0] / f) + + // Create the new regression series + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: s[j].X*gradient + intercept, + }) + } + + return regressions, nil +} + +// ExponentialRegression returns an exponential regression on data series +func ExponentialRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [6]float64 + + for i := 0; i < len(s); i++ { + if s[i].Y < 0 { + return nil, YCoordErr + } + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X * s[i].Y + sum[3] += s[i].Y * math.Log(s[i].Y) + sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) + sum[5] += s[i].X * s[i].Y + } + + denominator := (sum[1]*sum[2] - sum[5]*sum[5]) + a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) + b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: a * math.Exp(b*s[j].X), + }) + } + + return regressions, nil +} + +// LogarithmicRegression returns an logarithmic regression on data series +func LogarithmicRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [4]float64 + + i := 0 + for ; i < len(s); i++ { + sum[0] += math.Log(s[i].X) + sum[1] += s[i].Y * math.Log(s[i].X) + sum[2] += s[i].Y + sum[3] += math.Pow(math.Log(s[i].X), 2) + } + + f := float64(i) + a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) + b := (sum[2] - a*sum[0]) / f + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: b + a*math.Log(s[j].X), + }) + } + + return regressions, nil +} diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go new file mode 100644 index 00000000..b66779c9 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/round.go @@ -0,0 +1,38 @@ +package stats + +import "math" + +// Round a float to a specific decimal place or precision +func Round(input float64, places int) (rounded float64, err error) { + + // If the float is not a number + if math.IsNaN(input) { + return math.NaN(), NaNErr + } + + // Find out the actual sign and correct the input for later + sign := 1.0 + if input < 0 { + sign = -1 + input *= -1 + } + + // Use the places arg to get the amount of precision wanted + precision := math.Pow(10, float64(places)) + + // Find the decimal place we are looking to round + digit := input * precision + + // Get the actual decimal number as a fraction to be compared + _, decimal := math.Modf(digit) + + // If the decimal is less than .5 we round down otherwise up + if decimal >= 0.5 { + rounded = math.Ceil(digit) + } else { + rounded = math.Floor(digit) + } + + // Finally we do the math to actually create a rounded number + return rounded / precision * sign, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go new file mode 100644 index 00000000..40166af6 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sample.go @@ -0,0 +1,76 @@ +package stats + +import ( + "math/rand" + "sort" +) + +// Sample returns sample from input with replacement or without +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { + + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + if replacement { + + result := Float64Data{} + rand.Seed(unixnano()) + + // In every step, randomly take the num for + for i := 0; i < takenum; i++ { + idx := rand.Intn(length) + result = append(result, input[idx]) + } + + return result, nil + + } else if !replacement && takenum <= length { + + rand.Seed(unixnano()) + + // Get permutation of number of indexies + perm := rand.Perm(length) + result := Float64Data{} + + // Get element of input by permutated index + for _, idx := range perm[0:takenum] { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} + +// StableSample like stable sort, it returns samples from input while keeps the order of original data. +func StableSample(input Float64Data, takenum int) ([]float64, error) { + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + + if takenum <= length { + + rand.Seed(unixnano()) + + perm := rand.Perm(length) + perm = perm[0:takenum] + // Sort perm before applying + sort.Ints(perm) + result := Float64Data{} + + for _, idx := range perm { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} diff --git a/vendor/github.com/montanaflynn/stats/sigmoid.go b/vendor/github.com/montanaflynn/stats/sigmoid.go new file mode 100644 index 00000000..5f2559d8 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sigmoid.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sigmoid returns the input values in the range of -1 to 1 +// along the sigmoid or s-shaped curve, commonly used in +// machine learning while training neural networks as an +// activation function. +func Sigmoid(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + s := make([]float64, len(input)) + for i, v := range input { + s[i] = 1 / (1 + math.Exp(-v)) + } + return s, nil +} diff --git a/vendor/github.com/montanaflynn/stats/softmax.go b/vendor/github.com/montanaflynn/stats/softmax.go new file mode 100644 index 00000000..85072642 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/softmax.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. It +// is commonly used in machine learning neural networks. +func SoftMax(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + s := 0.0 + c, _ := Max(input) + for _, e := range input { + s += math.Exp(e - c) + } + + sm := make([]float64, len(input)) + for i, v := range input { + sm[i] = math.Exp(v-c) / s + } + + return sm, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go new file mode 100644 index 00000000..15b611d1 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sum.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sum adds all the numbers of a slice together +func Sum(input Float64Data) (sum float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Add em up + for _, n := range input { + sum += n + } + + return sum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go new file mode 100644 index 00000000..88199760 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/util.go @@ -0,0 +1,43 @@ +package stats + +import ( + "sort" + "time" +) + +// float64ToInt rounds a float64 to an int +func float64ToInt(input float64) (output int) { + r, _ := Round(input, 0) + return int(r) +} + +// unixnano returns nanoseconds from UTC epoch +func unixnano() int64 { + return time.Now().UTC().UnixNano() +} + +// copyslice copies a slice of float64s +func copyslice(input Float64Data) Float64Data { + s := make(Float64Data, input.Len()) + copy(s, input) + return s +} + +// sortedCopy returns a sorted copy of float64s +func sortedCopy(input Float64Data) (copy Float64Data) { + copy = copyslice(input) + sort.Float64s(copy) + return +} + +// sortedCopyDif returns a sorted copy of float64s +// only if the original data isn't sorted. +// Only use this if returned slice won't be manipulated! +func sortedCopyDif(input Float64Data) (copy Float64Data) { + if sort.Float64sAreSorted(input) { + return input + } + copy = copyslice(input) + sort.Float64s(copy) + return +} diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go new file mode 100644 index 00000000..a6445690 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/variance.go @@ -0,0 +1,105 @@ +package stats + +import "math" + +// _variance finds the variance for both population and sample data +func _variance(input Float64Data, sample int) (variance float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Sum the square of the mean subtracted from each number + m, _ := Mean(input) + + for _, n := range input { + variance += (n - m) * (n - m) + } + + // When getting the mean of the squared differences + // "sample" will allow us to know if it's a sample + // or population and wether to subtract by one or not + return variance / float64((input.Len() - (1 * sample))), nil +} + +// Variance the amount of variation in the dataset +func Variance(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// PopulationVariance finds the amount of variance within a population +func PopulationVariance(input Float64Data) (pvar float64, err error) { + + v, err := _variance(input, 0) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// SampleVariance finds the amount of variance within a sample +func SampleVariance(input Float64Data) (svar float64, err error) { + + v, err := _variance(input, 1) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// Covariance is a measure of how much two sets of data change +func Covariance(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + // Calculate sum of squares + var ss float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + ss += (delta1*delta2 - ss) / float64(i+1) + } + + return ss * float64(l1) / float64(l1-1), nil +} + +// CovariancePopulation computes covariance for entire population between two variables. +func CovariancePopulation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + var s float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + s += delta1 * delta2 + } + + return s / float64(l1), nil +} diff --git a/vendor/github.com/nsqio/go-nsq/AUTHORS b/vendor/github.com/nsqio/go-nsq/AUTHORS new file mode 100644 index 00000000..770f2792 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/AUTHORS @@ -0,0 +1,15 @@ +# For a complete listing, see https://github.com/nsqio/go-nsq/graphs/contributors + +# Original Authors + +Matt Reiferson +Jehiah Czebotar + +# Maintainers + +Pierce Lopez + +# Disclaimer + +Matt Reiferson's contributions to this project are being made solely in a personal capacity +and does not convey any rights to any intellectual property of any third parties. diff --git a/vendor/github.com/nsqio/go-nsq/ChangeLog.md b/vendor/github.com/nsqio/go-nsq/ChangeLog.md new file mode 100644 index 00000000..bb1fadc5 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/ChangeLog.md @@ -0,0 +1,284 @@ +## go-nsq Change Log + +### 1.1.0 - 2021-10-25 + + * #275/#281 - support separate Logger for each log level (thanks @crazyweave) + * #282 - consumer: reduce duplicate RDY (ready) count updates (thanks @andyxning) + * #283 - remove redundant Config initialized check (thanks @SwanSpouse) + * #313 - add Authorization header to lookup queries + * #321 - consumer: fix panic with some invalid lookupd http addresses (thanks @martin-sucha) + * #317 - producer: connect() code-style improvement (thanks @martin-sucha) + * #330 - fix random backoff jitter on 32-bit architectures + * #333 - consumer: re-use http client with keepalives for lookupd requests (thanks @JieTrancender) + * #336 - producer: shutdown logging prefix consistent with other logging (thanks @karalabe) + * #294 - docs: fix producer example (thanks @nikitabuyevich) + * #307 - docs: add exit signal handling to consumer example + * #324 - docs: fix Consumer.SetLogger() description (thanks @gabriel-vasile) + * #297 - add AUTHORS file + * #329/#330 - switch to GitHub Actions for CI + +### 1.0.8 - 2019-12-24 + +Thanks to @judwhite, @vitaliytv, and @HaraldNordgren for contributing to testing and dependency management improvements + + * #248 - support go modules + * #249 - consumer: update RDY when setting MaxInFlight to 0 + * #267 - check response message size is positive (thanks @andyxning) + * #271 - godoc for publisher and consumer (thanks @skateinmars) + * #270 - set log level (thanks @YongHaoWu) + * #255 - go vet tls.Config copying (thanks @iaburton) + +### 1.0.7 - 2017-08-04 + +**Upgrading from 1.0.6**: There are no backward incompatible changes. + + * #97/#209 - consumer: retry nsqlookupd queries + * #179/#208 - consumer: redistribute RDY when connections are active + * #184/#201 - producer: fix misleading Stop() EOF (thanks @mengskysama) + * #203 - switch to golang/snappy (addressing potential snappy related deadlocks) + * #202 - consumer: fix backoff logging + +### 1.0.6 - 2016-06-04 + +**Upgrading from 1.0.5**: There are no backward incompatible changes. + + * #175 - consumer: reduce garbage generation in DecodeMessage (thanks @Dieterbe) + * #162 - producer: support `DeferredPublish` (thanks @DanielHeckrath) + +### 1.0.5 - 2015-09-19 + +**Upgrading from 1.0.4**: There are no backward incompatible changes. + + * #156 - consumer: prevent data race on RNG + * #155 - config: support `flag.Value` interface + * #147/#150 - consumer: fix application of `max_backoff_duration` (thanks @judwhite) + * #138 - fix lint, vet, fmt issues + * #137 - remove `go-simplejson` dependency + +### 1.0.4 - 2015-04-07 + +**Upgrading from 1.0.3**: There are no backward incompatible changes. + + * #133 - fix `ErrNotConnected` race during `Producer` connection (thanks @jeddenlea) + * #132 - fix `RDY` redistribution after backoff with no connections + * #128 - fix backoff stall when using `RequeueWithoutBackoff` + * #127 - fix handling of connection closing when resuming after backoff (thanks @jnewmano) + * #126 - allow `BackoffStrategy` to be set via flag (thanks @twmb) + * #125 - add pluggable consumer `BackoffStrategy`; add full-jitter strategy (thanks @hden) + * #124 - add `DialTimeout` and `LocalAddr` config (thanks @yashkin) + * #119 - add `Producer.Ping()` method (thanks @zulily) + * #122 - refactor log level string handling + * #120 - fix `Message` data races on `responded` + * #114 - fix lookupd jitter having no effect (thanks @judwhite) + +### 1.0.3 - 2015-02-07 + +**Upgrading from 1.0.2**: There are no backward incompatible changes. + + * #104 - fix reconnect address bug (thanks @ryanslade) + * #106 - fix backoff reconnect deadlock (thanks @ryanslade) + * #107 - fix out-of-bounds error when removing nsqlookupd addresses (thanks @andreas) + * #108 - fix potential logger race conditions (thanks @judwhite) + * #111 - fix resolved address error in reconnect loop (thanks @twmb) + +### 1.0.2 - 2015-01-21 + +**Upgrading from 1.0.1**: There are no backward incompatible changes. + + * #102 - TLS min/max config defaults (thanks @twmb) + * #99 - fix `Consumer.Stop()` race and `Producer.Stop()` deadlock (thanks @tylertreat) + * #92 - expose `Message.NSQDAddress` + * #95 - cleanup panic during `Consumer.Stop()` if handlers are deadlocked + * #98 - add `tls-min-version` option (thanks @twmb) + * #93 - expose a way to get `Consumer` runtime stats (thanks @dcarney) + * #94 - allow `#ephemeral` topic names (thanks @jamesgroat) + +### 1.0.1 - 2014-11-09 + +**Upgrading from 1.0.0**: There are no backward incompatible changes functionally, however this +release no longer compiles with Go `1.0.x`. + + * #89 - don't spam connection teardown cleanup messages + * #91 - add consumer `DisconnectFrom*` + * #87 - allow `heartbeat_interval` and `output_buffer_timeout` to be disabled + * #86 - pluggable `nsqlookupd` behaviors + * #83 - send `RDY` before `FIN`/`REQ` (forwards compatibility with nsqio/nsq#404) + * #82 - fix panic when conn isn't assigned + * #75/#76 - minor config related bug fixes + * #75/#77/#78 - add `tls-cert` and `tls-key` config options + +### 1.0.0 - 2014-08-11 + +**Upgrading from 0.3.7**: The public API was significantly refactored and is not backwards +compatible, please read [UPGRADING](UPGRADING.md). + + * #58 - support `IDENTIFY` `msg_timeout` + * #54 - per-connection TLS config and set `ServerName` + * #49 - add common connect helpers + * #43/#63 - more flexible `nsqlookupd` URL specification + * #35 - `AUTH` support + * #41/#62 - use package private RNG + * #36 - support 64 character topic/channel names + * #30/#38/#39/#42/#45/#46/#48/#51/#52/#65/#70 - refactor public API (see [UPGRADING](UPGRADING.md)) + +### 0.3.7 - 2014-05-25 + +**Upgrading from 0.3.6**: There are no backward incompatible changes. **THIS IS THE LAST STABLE +RELEASE PROVIDING THIS API**. Future releases will be based on the api in #30 and **will not be +backwards compatible!** + +This is a bug fix release relating to the refactoring done in `0.3.6`. + + * #32 - fix potential panic for race condition when # conns == 0 + * #33/#34 - more granular connection locking + +### 0.3.6 - 2014-04-29 + +**Upgrading from 0.3.5**: There are no backward incompatible changes. + +This release includes a significant internal refactoring, designed +to better encapsulate responsibility, see #19. + +Specifically: + + * make `Conn` public + * move transport responsibilities into `Conn` from `Reader`/`Writer` + * supply callbacks for hooking into `Conn` events + +As part of the refactoring, a few additional clean exit related +issues were resolved: + + * wait group now includes all exit related goroutines + * ensure that readLoop exits before exiting cleanup + * always check messagesInFlight at readLoop exit + * close underlying connection last + +### 0.3.5 - 2014-04-05 + +**Upgrading from 0.3.4**: There are no backward incompatible changes. + +This release includes a few new features such as support for channel +sampling and sending along a user agent string (which is now displayed +in `nsqadmin`). + +Also, a critical bug fix for potential deadlocks (thanks @kjk +for reporting and help testing). + +New Features/Improvements: + + * #27 - reader logs disambiguate topic/channel + * #22 - channel sampling + * #23 - user agent + +Bug Fixes: + + * #24 - fix racey reader IDENTIFY buffering + * #29 - fix recursive RLock deadlocks + +### 0.3.4 - 2013-11-19 + +**Upgrading from 0.3.3**: There are no backward incompatible changes. + +This is a bug fix release, notably potential deadlocks in `Message.Requeue()` and `Message.Touch()` +as well as a potential busy loop cleaning up closed connections with in-flight messages. + +New Features/Improvements: + + * #14 - add `Reader.Configure()` + * #18 - return an exported error when an `nsqlookupd` address is already configured + +Bug Fixes: + + * #15 - dont let `handleError()` loop if already connected + * #17 - resolve potential deadlocks on `Message` responders + * #16 - eliminate busy loop when draining `finishedMessages` + +### 0.3.3 - 2013-10-21 + +**Upgrading from 0.3.2**: This release requires NSQ binary version `0.2.23+` for compression +support. + +This release contains significant `Reader` refactoring of the RDY handling code paths. The +motivation is documented in #1 however the commits in #8 identify individual changes. Additionally, +we eliminated deadlocks during connection cleanup in `Writer`. + +As a result, both user-facing APIs should now be considerably more robust and stable. Additionally, +`Reader` should behave better when backing off. + +New Features/Improvements: + + * #9 - ability to ignore publish responses in `Writer` + * #12 - `Requeue()` method on `Message` + * #6 - `Touch()` method on `Message` + * #4 - snappy/deflate feature negotiation + +Bug Fixes: + + * #8 - `Reader` RDY handling refactoring (race conditions, deadlocks, consolidation) + * #13 - fix `Writer` deadlocks + * #10 - stop accessing simplejson internals + * #5 - fix `max-in-flight` race condition + +### 0.3.2 - 2013-08-26 + +**Upgrading from 0.3.1**: This release requires NSQ binary version `0.2.22+` for TLS support. + +New Features/Improvements: + + * #227 - TLS feature negotiation + * #164/#202/#255 - add `Writer` + * #186 - `MaxBackoffDuration` of `0` disables backoff + * #175 - support for `nsqd` config option `--max-rdy-count` + * #169 - auto-reconnect to hard-coded `nsqd` + +Bug Fixes: + + * #254/#256/#257 - new connection RDY starvation + * #250 - `nsqlookupd` polling improvements + * #243 - limit `IsStarved()` to connections w/ in-flight messages + * #169 - use last RDY count for `IsStarved()`; redistribute RDY state + * #204 - fix early termination blocking + * #177 - support `broadcast_address` + * #161 - connection pool goroutine safety + +### 0.3.1 - 2013-02-07 + +**Upgrading from 0.3.0**: This release requires NSQ binary version `0.2.17+` for `TOUCH` support. + + * #119 - add TOUCH command + * #133 - improved handling of errors/magic + * #127 - send IDENTIFY (missed in #90) + * #16 - add backoff to Reader + +### 0.3.0 - 2013-01-07 + +**Upgrading from 0.2.4**: There are no backward incompatible changes to applications +written against the public `nsq.Reader` API. + +However, there *are* a few backward incompatible changes to the API for applications that +directly use other public methods, or properties of a few NSQ data types: + +`nsq.Message` IDs are now a type `nsq.MessageID` (a `[16]byte` array). The signatures of +`nsq.Finish()` and `nsq.Requeue()` reflect this change. + +`nsq.SendCommand()` and `nsq.Frame()` were removed in favor of `nsq.SendFramedResponse()`. + +`nsq.Subscribe()` no longer accepts `shortId` and `longId`. If upgrading your consumers +before upgrading your `nsqd` binaries to `0.2.16-rc.1` they will not be able to send the +optional custom identifiers. + + * #90 performance optimizations + * #81 reader performance improvements / MPUB support + +### 0.2.4 - 2012-10-15 + + * #69 added IsStarved() to reader API + +### 0.2.3 - 2012-10-11 + + * #64 timeouts on reader queries to lookupd + * #54 fix crash issue with reader cleaning up from unexpectedly closed nsqd connections + +### 0.2.2 - 2012-10-09 + + * Initial public release diff --git a/vendor/github.com/nsqio/go-nsq/LICENSE b/vendor/github.com/nsqio/go-nsq/LICENSE new file mode 100644 index 00000000..89de3547 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nsqio/go-nsq/README.md b/vendor/github.com/nsqio/go-nsq/README.md new file mode 100644 index 00000000..326ac497 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/README.md @@ -0,0 +1,19 @@ +## go-nsq + +[![Build Status](https://github.com/nsqio/go-nsq/workflows/tests/badge.svg)](https://github.com/nsqio/go-nsq/actions) [![GoDoc](https://godoc.org/github.com/nsqio/go-nsq?status.svg)](https://godoc.org/github.com/nsqio/go-nsq) [![GitHub release](https://img.shields.io/github/release/nsqio/go-nsq.svg)](https://github.com/nsqio/go-nsq/releases/latest) + +The official Go package for [NSQ][nsq]. + +### Docs + +See [godoc][nsq_gopkgdoc] and the [main repo apps][apps] directory for examples of clients built +using this package. + +### Tests + +Tests are run via `./test.sh` (which requires `nsqd` and `nsqlookupd` to be installed). + +[nsq]: https://github.com/nsqio/nsq +[nsq_gopkgdoc]: http://godoc.org/github.com/nsqio/go-nsq +[apps]: https://github.com/nsqio/nsq/tree/master/apps +[travis]: http://travis-ci.org/nsqio/go-nsq diff --git a/vendor/github.com/nsqio/go-nsq/UPGRADING.md b/vendor/github.com/nsqio/go-nsq/UPGRADING.md new file mode 100644 index 00000000..ad2e7cd0 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/UPGRADING.md @@ -0,0 +1,180 @@ +This outlines the backwards incompatible changes that were made to the public API after the +`v0.3.7` stable release, and and how to migrate existing legacy codebases. + +#### Background + +The original `go-nsq` codebase is some of our earliest Go code, and one of our first attempts at a +public Go library. + +We've learned a lot over the last 2 years and we wanted `go-nsq` to reflect the experiences we've +had working with the library as well as the general Go conventions and best practices we picked up +along the way. + +The diff can be seen via: https://github.com/nsqio/go-nsq/compare/v0.3.7...HEAD + +The bulk of the refactoring came via: https://github.com/nsqio/go-nsq/pull/30 + +#### Naming + +Previously, the high-level types we exposed were named `nsq.Reader` and `nsq.Writer`. These +reflected internal naming conventions we had used at bitly for some time but conflated semantics +with what a typical Go developer would expect (they obviously did not implement `io.Reader` and +`io.Writer`). + +We renamed these types to `nsq.Consumer` and `nsq.Producer`, which more effectively communicate +their purpose and is consistent with the NSQ documentation. + +#### Configuration + +In the previous API there were inconsistent and confusing ways to configure your clients. + +Now, configuration is performed *before* creating an `nsq.Consumer` or `nsq.Producer` by creating +an `nsq.Config` struct. The only valid way to do this is via `nsq.NewConfig` (i.e. using a struct +literal will panic due to invalid internal state). + +The `nsq.Config` struct has exported variables that can be set directly in a type-safe manner. You +can also call `cfg.Validate()` to check that the values are correct and within range. + +`nsq.Config` also exposes a convenient helper method `Set(k string, v interface{})` that can set +options by *coercing* the supplied `interface{}` value. + +This is incredibly convenient if you're reading options from a config file or in a serialized +format that does not exactly match the native types. + +It is both flexible and forgiving. + +#### Improving the nsq.Handler interface + +`go-nsq` attempts to make writing the common use case consumer incredibly easy. + +You specify a type that implements the `nsq.Handler` interface, the interface method is called per +message, and the return value of said method indicates to the library what the response to `nsqd` +should be (`FIN` or `REQ`), all the while managing flow control and backoff. + +However, more advanced use cases require the ability to respond to a message *later* +("asynchronously", if you will). Our original API provided a *second* message handler interface +called `nsq.AsyncHandler`. + +Unfortunately, it was never obvious from the name alone (or even the documentation) how to properly +use this form. The API was needlessly complex, involving the garbage creation of wrapping structs +to track state and respond to messages. + +We originally had the same problem in `pynsq`, our Python client library, and we were able to +resolve the tension and expose an API that was robust and supported all use cases. + +The new `go-nsq` message handler interface exposes only `nsq.Handler`, and its `HandleMessage` +method remains identical (specifically, `nsq.AsyncHandler` has been removed). + +Additionally, the API to configure handlers has been improved to provide better first-class support +for common operations. We've added `AddConcurrentHandlers` (for quickly spawning multiple handler +goroutines). + +For the most common use case, where you want `go-nsq` to respond to messages on your behalf, there +are no changes required! In fact, we've made it even easier to implement the `nsq.Handler` +interface for simple functions by providing the `nsq.HandlerFunc` type (in the spirit of the Go +standard library's `http.HandlerFunc`): + +```go +r, err := nsq.NewConsumer("test_topic", "test_channel", nsq.NewConfig()) +if err != nil { + log.Fatalf(err.Error()) +} + +r.AddHandler(nsq.HandlerFunc(func(m *nsq.Message) error { + return doSomeWork(m) +}) + +err := r.ConnectToNSQD(nsqdAddr) +if err != nil { + log.Fatalf(err.Error()) +} + +<-r.StopChan +``` + +In the new API, we've made the `nsq.Message` struct more robust, giving it the ability to proxy +responses. If you want to usurp control of the message from `go-nsq`, you simply call +`msg.DisableAutoResponse()`. + +This is effectively the same as if you had used `nsq.AsyncHandler`, only you don't need to manage +`nsq.FinishedMessage` structs or implement a separate interface. Instead you just keep/pass +references to the `nsq.Message` itself, and when you're ready to respond you call `msg.Finish()`, +`msg.Requeue()` or `msg.Touch()`. Additionally, this means you can make this +decision on a *per-message* basis rather than for the lifetime of the handler. + +Here is an example: + +```go +type myHandler struct {} + +func (h *myHandler) HandleMessage(m *nsq.Message) error { + m.DisableAutoResponse() + workerChan <- m + return nil +} + +go func() { + for m := range workerChan { + err := doSomeWork(m) + if err != nil { + m.Requeue(-1) + continue + } + m.Finish() + } +}() + +cfg := nsq.NewConfig() +cfg.MaxInFlight = 1000 +r, err := nsq.NewConsumer("test_topic", "test_channel", cfg) +if err != nil { + log.Fatalf(err.Error()) +} +r.AddConcurrentHandlers(&myHandler{}, 20) + +err := r.ConnectToNSQD(nsqdAddr) +if err != nil { + log.Fatalf(err.Error()) +} + +<-r.StopChan +``` + +#### Requeue without backoff + +As a side effect of the message handler restructuring above, it is now trivial to respond to a +message without triggering a backoff state in `nsq.Consumer` (which was not possible in the +previous API). + +The `nsq.Message` type now has a `msg.RequeueWithoutBackoff()` method for this purpose. + +#### Producer Error Handling + +Previously, `Writer` (now `Producer`) returned a triplicate of `frameType`, `responseBody`, and +`error` from calls to `*Publish`. + +This required the caller to check both `error` and `frameType` to confirm success. `Producer` +publish methods now return only `error`. + +#### Logging + +One of the challenges library implementors face is how to provide feedback via logging, while +exposing an interface that follows the standard library and still provides a means to control and +configure the output. + +In the new API, we've provided a method on `Consumer` and `Producer` called `SetLogger` that takes +an interface compatible with the Go standard library `log.Logger` (which can be instantiated via +`log.NewLogger`) and a traditional log level integer `nsq.LogLevel{Debug,Info,Warning,Error}`: + + Output(maxdepth int, s string) error + +This gives the user the flexibility to control the format, destination, and verbosity while still +conforming to standard library logging conventions. + +#### Misc. + +Un-exported `NewDeadlineTransport` and `ApiRequest`, which never should have been exported in the +first place. + +`nsq.Message` serialization switched away from `binary.{Read,Write}` for performance and +`nsq.Message` now implements the `io.WriterTo` interface. diff --git a/vendor/github.com/nsqio/go-nsq/api_request.go b/vendor/github.com/nsqio/go-nsq/api_request.go new file mode 100644 index 00000000..e565e7cf --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/api_request.go @@ -0,0 +1,78 @@ +package nsq + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "time" +) + +type deadlinedConn struct { + Timeout time.Duration + net.Conn +} + +func (c *deadlinedConn) Read(b []byte) (n int, err error) { + c.Conn.SetReadDeadline(time.Now().Add(c.Timeout)) + return c.Conn.Read(b) +} + +func (c *deadlinedConn) Write(b []byte) (n int, err error) { + c.Conn.SetWriteDeadline(time.Now().Add(c.Timeout)) + return c.Conn.Write(b) +} + +type wrappedResp struct { + Status string `json:"status_txt"` + StatusCode int `json:"status_code"` + Data interface{} `json:"data"` +} + +// stores the result in the value pointed to by ret(must be a pointer) +func apiRequestNegotiateV1(httpclient *http.Client, method string, endpoint string, headers http.Header, ret interface{}) error { + req, err := http.NewRequest(method, endpoint, nil) + if err != nil { + return err + } + for k, v := range headers { + req.Header[k] = v + } + + req.Header.Add("Accept", "application/vnd.nsq; version=1.0") + + resp, err := httpclient.Do(req) + if err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + + if resp.StatusCode != 200 { + return fmt.Errorf("got response %s %q", resp.Status, respBody) + } + + if len(respBody) == 0 { + respBody = []byte("{}") + } + + if resp.Header.Get("X-NSQ-Content-Type") == "nsq; version=1.0" { + return json.Unmarshal(respBody, ret) + } + + wResp := &wrappedResp{ + Data: ret, + } + + if err = json.Unmarshal(respBody, wResp); err != nil { + return err + } + + // wResp.StatusCode here is equal to resp.StatusCode, so ignore it + return nil +} diff --git a/vendor/github.com/nsqio/go-nsq/command.go b/vendor/github.com/nsqio/go-nsq/command.go new file mode 100644 index 00000000..80e47436 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/command.go @@ -0,0 +1,221 @@ +package nsq + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "strconv" + "time" +) + +var byteSpace = []byte(" ") +var byteNewLine = []byte("\n") + +// Command represents a command from a client to an NSQ daemon +type Command struct { + Name []byte + Params [][]byte + Body []byte +} + +// String returns the name and parameters of the Command +func (c *Command) String() string { + if len(c.Params) > 0 { + return fmt.Sprintf("%s %s", c.Name, string(bytes.Join(c.Params, byteSpace))) + } + return string(c.Name) +} + +// WriteTo implements the WriterTo interface and +// serializes the Command to the supplied Writer. +// +// It is suggested that the target Writer is buffered +// to avoid performing many system calls. +func (c *Command) WriteTo(w io.Writer) (int64, error) { + var total int64 + var buf [4]byte + + n, err := w.Write(c.Name) + total += int64(n) + if err != nil { + return total, err + } + + for _, param := range c.Params { + n, err := w.Write(byteSpace) + total += int64(n) + if err != nil { + return total, err + } + n, err = w.Write(param) + total += int64(n) + if err != nil { + return total, err + } + } + + n, err = w.Write(byteNewLine) + total += int64(n) + if err != nil { + return total, err + } + + if c.Body != nil { + bufs := buf[:] + binary.BigEndian.PutUint32(bufs, uint32(len(c.Body))) + n, err := w.Write(bufs) + total += int64(n) + if err != nil { + return total, err + } + n, err = w.Write(c.Body) + total += int64(n) + if err != nil { + return total, err + } + } + + return total, nil +} + +// Identify creates a new Command to provide information about the client. After connecting, +// it is generally the first message sent. +// +// The supplied map is marshaled into JSON to provide some flexibility +// for this command to evolve over time. +// +// See http://nsq.io/clients/tcp_protocol_spec.html#identify for information +// on the supported options +func Identify(js map[string]interface{}) (*Command, error) { + body, err := json.Marshal(js) + if err != nil { + return nil, err + } + return &Command{[]byte("IDENTIFY"), nil, body}, nil +} + +// Auth sends credentials for authentication +// +// After `Identify`, this is usually the first message sent, if auth is used. +func Auth(secret string) (*Command, error) { + return &Command{[]byte("AUTH"), nil, []byte(secret)}, nil +} + +// Register creates a new Command to add a topic/channel for the connected nsqd +func Register(topic string, channel string) *Command { + params := [][]byte{[]byte(topic)} + if len(channel) > 0 { + params = append(params, []byte(channel)) + } + return &Command{[]byte("REGISTER"), params, nil} +} + +// UnRegister creates a new Command to remove a topic/channel for the connected nsqd +func UnRegister(topic string, channel string) *Command { + params := [][]byte{[]byte(topic)} + if len(channel) > 0 { + params = append(params, []byte(channel)) + } + return &Command{[]byte("UNREGISTER"), params, nil} +} + +// Ping creates a new Command to keep-alive the state of all the +// announced topic/channels for a given client +func Ping() *Command { + return &Command{[]byte("PING"), nil, nil} +} + +// Publish creates a new Command to write a message to a given topic +func Publish(topic string, body []byte) *Command { + var params = [][]byte{[]byte(topic)} + return &Command{[]byte("PUB"), params, body} +} + +// DeferredPublish creates a new Command to write a message to a given topic +// where the message will queue at the channel level until the timeout expires +func DeferredPublish(topic string, delay time.Duration, body []byte) *Command { + var params = [][]byte{[]byte(topic), []byte(strconv.Itoa(int(delay / time.Millisecond)))} + return &Command{[]byte("DPUB"), params, body} +} + +// MultiPublish creates a new Command to write more than one message to a given topic +// (useful for high-throughput situations to avoid roundtrips and saturate the pipe) +func MultiPublish(topic string, bodies [][]byte) (*Command, error) { + var params = [][]byte{[]byte(topic)} + + num := uint32(len(bodies)) + bodySize := 4 + for _, b := range bodies { + bodySize += len(b) + 4 + } + body := make([]byte, 0, bodySize) + buf := bytes.NewBuffer(body) + + err := binary.Write(buf, binary.BigEndian, &num) + if err != nil { + return nil, err + } + for _, b := range bodies { + err = binary.Write(buf, binary.BigEndian, int32(len(b))) + if err != nil { + return nil, err + } + _, err = buf.Write(b) + if err != nil { + return nil, err + } + } + + return &Command{[]byte("MPUB"), params, buf.Bytes()}, nil +} + +// Subscribe creates a new Command to subscribe to the given topic/channel +func Subscribe(topic string, channel string) *Command { + var params = [][]byte{[]byte(topic), []byte(channel)} + return &Command{[]byte("SUB"), params, nil} +} + +// Ready creates a new Command to specify +// the number of messages a client is willing to receive +func Ready(count int) *Command { + var params = [][]byte{[]byte(strconv.Itoa(count))} + return &Command{[]byte("RDY"), params, nil} +} + +// Finish creates a new Command to indiciate that +// a given message (by id) has been processed successfully +func Finish(id MessageID) *Command { + var params = [][]byte{id[:]} + return &Command{[]byte("FIN"), params, nil} +} + +// Requeue creates a new Command to indicate that +// a given message (by id) should be requeued after the given delay +// NOTE: a delay of 0 indicates immediate requeue +func Requeue(id MessageID, delay time.Duration) *Command { + var params = [][]byte{id[:], []byte(strconv.Itoa(int(delay / time.Millisecond)))} + return &Command{[]byte("REQ"), params, nil} +} + +// Touch creates a new Command to reset the timeout for +// a given message (by id) +func Touch(id MessageID) *Command { + var params = [][]byte{id[:]} + return &Command{[]byte("TOUCH"), params, nil} +} + +// StartClose creates a new Command to indicate that the +// client would like to start a close cycle. nsqd will no longer +// send messages to a client in this state and the client is expected +// finish pending messages and close the connection +func StartClose() *Command { + return &Command{[]byte("CLS"), nil, nil} +} + +// Nop creates a new Command that has no effect server side. +// Commonly used to respond to heartbeats +func Nop() *Command { + return &Command{[]byte("NOP"), nil, nil} +} diff --git a/vendor/github.com/nsqio/go-nsq/config.go b/vendor/github.com/nsqio/go-nsq/config.go new file mode 100644 index 00000000..1f7ea2cd --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/config.go @@ -0,0 +1,674 @@ +package nsq + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "log" + "math" + "math/rand" + "net" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unsafe" +) + +// Define handlers for setting config defaults, and setting config values from command line arguments or config files +type configHandler interface { + HandlesOption(c *Config, option string) bool + Set(c *Config, option string, value interface{}) error + Validate(c *Config) error +} + +type defaultsHandler interface { + SetDefaults(c *Config) error +} + +// BackoffStrategy defines a strategy for calculating the duration of time +// a consumer should backoff for a given attempt +type BackoffStrategy interface { + Calculate(attempt int) time.Duration +} + +// ExponentialStrategy implements an exponential backoff strategy (default) +type ExponentialStrategy struct { + cfg *Config +} + +// Calculate returns a duration of time: 2 ^ attempt +func (s *ExponentialStrategy) Calculate(attempt int) time.Duration { + backoffDuration := s.cfg.BackoffMultiplier * + time.Duration(math.Pow(2, float64(attempt))) + return backoffDuration +} + +func (s *ExponentialStrategy) setConfig(cfg *Config) { + s.cfg = cfg +} + +// FullJitterStrategy implements http://www.awsarchitectureblog.com/2015/03/backoff.html +type FullJitterStrategy struct { + cfg *Config + + rngOnce sync.Once + rng *rand.Rand +} + +// Calculate returns a random duration of time [0, 2 ^ attempt] +func (s *FullJitterStrategy) Calculate(attempt int) time.Duration { + // lazily initialize the RNG + s.rngOnce.Do(func() { + if s.rng != nil { + return + } + s.rng = rand.New(rand.NewSource(time.Now().UnixNano())) + }) + + backoffDuration := s.cfg.BackoffMultiplier * + time.Duration(math.Pow(2, float64(attempt))) + return time.Duration(s.rng.Int63n(int64(backoffDuration))) +} + +func (s *FullJitterStrategy) setConfig(cfg *Config) { + s.cfg = cfg +} + +// Config is a struct of NSQ options +// +// The only valid way to create a Config is via NewConfig, using a struct literal will panic. +// After Config is passed into a high-level type (like Consumer, Producer, etc.) the values are no +// longer mutable (they are copied). +// +// Use Set(option string, value interface{}) as an alternate way to set parameters +type Config struct { + initialized bool + + // used to Initialize, Validate + configHandlers []configHandler + + DialTimeout time.Duration `opt:"dial_timeout" default:"1s"` + + // Deadlines for network reads and writes + ReadTimeout time.Duration `opt:"read_timeout" min:"100ms" max:"5m" default:"60s"` + WriteTimeout time.Duration `opt:"write_timeout" min:"100ms" max:"5m" default:"1s"` + + // LocalAddr is the local address to use when dialing an nsqd. + // If empty, a local address is automatically chosen. + LocalAddr net.Addr `opt:"local_addr"` + + // Duration between polling lookupd for new producers, and fractional jitter to add to + // the lookupd pool loop. this helps evenly distribute requests even if multiple consumers + // restart at the same time + // + // NOTE: when not using nsqlookupd, LookupdPollInterval represents the duration of time between + // reconnection attempts + LookupdPollInterval time.Duration `opt:"lookupd_poll_interval" min:"10ms" max:"5m" default:"60s"` + LookupdPollJitter float64 `opt:"lookupd_poll_jitter" min:"0" max:"1" default:"0.3"` + LookupdPollTimeout time.Duration `opt:"lookupd_poll_timeout" default:"1m"` + + // Maximum duration when REQueueing (for doubling of deferred requeue) + MaxRequeueDelay time.Duration `opt:"max_requeue_delay" min:"0" max:"60m" default:"15m"` + DefaultRequeueDelay time.Duration `opt:"default_requeue_delay" min:"0" max:"60m" default:"90s"` + + // Backoff strategy, defaults to exponential backoff. Overwrite this to define alternative backoff algrithms. + BackoffStrategy BackoffStrategy `opt:"backoff_strategy" default:"exponential"` + // Maximum amount of time to backoff when processing fails 0 == no backoff + MaxBackoffDuration time.Duration `opt:"max_backoff_duration" min:"0" max:"60m" default:"2m"` + // Unit of time for calculating consumer backoff + BackoffMultiplier time.Duration `opt:"backoff_multiplier" min:"0" max:"60m" default:"1s"` + + // Maximum number of times this consumer will attempt to process a message before giving up + MaxAttempts uint16 `opt:"max_attempts" min:"0" max:"65535" default:"5"` + + // Duration to wait for a message from an nsqd when in a state where RDY + // counts are re-distributed (e.g. max_in_flight < num_producers) + LowRdyIdleTimeout time.Duration `opt:"low_rdy_idle_timeout" min:"1s" max:"5m" default:"10s"` + // Duration to wait until redistributing RDY for an nsqd regardless of LowRdyIdleTimeout + LowRdyTimeout time.Duration `opt:"low_rdy_timeout" min:"1s" max:"5m" default:"30s"` + // Duration between redistributing max-in-flight to connections + RDYRedistributeInterval time.Duration `opt:"rdy_redistribute_interval" min:"1ms" max:"5s" default:"5s"` + + // Identifiers sent to nsqd representing this client + // UserAgent is in the spirit of HTTP (default: "/") + ClientID string `opt:"client_id"` // (defaults: short hostname) + Hostname string `opt:"hostname"` + UserAgent string `opt:"user_agent"` + + // Duration of time between heartbeats. This must be less than ReadTimeout + HeartbeatInterval time.Duration `opt:"heartbeat_interval" default:"30s"` + // Integer percentage to sample the channel (requires nsqd 0.2.25+) + SampleRate int32 `opt:"sample_rate" min:"0" max:"99"` + + // To set TLS config, use the following options: + // + // tls_v1 - Bool enable TLS negotiation + // tls_root_ca_file - String path to file containing root CA + // tls_insecure_skip_verify - Bool indicates whether this client should verify server certificates + // tls_cert - String path to file containing public key for certificate + // tls_key - String path to file containing private key for certificate + // tls_min_version - String indicating the minimum version of tls acceptable ('ssl3.0', 'tls1.0', 'tls1.1', 'tls1.2') + // + TlsV1 bool `opt:"tls_v1"` + TlsConfig *tls.Config `opt:"tls_config"` + + // Compression Settings + Deflate bool `opt:"deflate"` + DeflateLevel int `opt:"deflate_level" min:"1" max:"9" default:"6"` + Snappy bool `opt:"snappy"` + + // Size of the buffer (in bytes) used by nsqd for buffering writes to this connection + OutputBufferSize int64 `opt:"output_buffer_size" default:"16384"` + // Timeout used by nsqd before flushing buffered writes (set to 0 to disable). + // + // WARNING: configuring clients with an extremely low + // (< 25ms) output_buffer_timeout has a significant effect + // on nsqd CPU usage (particularly with > 50 clients connected). + OutputBufferTimeout time.Duration `opt:"output_buffer_timeout" default:"250ms"` + + // Maximum number of messages to allow in flight (concurrency knob) + MaxInFlight int `opt:"max_in_flight" min:"0" default:"1"` + + // The server-side message timeout for messages delivered to this client + MsgTimeout time.Duration `opt:"msg_timeout" min:"0"` + + // Secret for nsqd authentication (requires nsqd 0.2.29+) + AuthSecret string `opt:"auth_secret"` + // Use AuthSecret as 'Authorization: Bearer {AuthSecret}' on lookupd queries + LookupdAuthorization bool `opt:"skip_lookupd_authorization" default:"true"` +} + +// NewConfig returns a new default nsq configuration. +// +// This must be used to initialize Config structs. Values can be set directly, or through Config.Set() +func NewConfig() *Config { + c := &Config{ + configHandlers: []configHandler{&structTagsConfig{}, &tlsConfig{}}, + initialized: true, + } + if err := c.setDefaults(); err != nil { + panic(err.Error()) + } + return c +} + +// Set takes an option as a string and a value as an interface and +// attempts to set the appropriate configuration option. +// +// It attempts to coerce the value into the right format depending on the named +// option and the underlying type of the value passed in. +// +// Calls to Set() that take a time.Duration as an argument can be input as: +// +// "1000ms" (a string parsed by time.ParseDuration()) +// 1000 (an integer interpreted as milliseconds) +// 1000*time.Millisecond (a literal time.Duration value) +// +// Calls to Set() that take bool can be input as: +// +// "true" (a string parsed by strconv.ParseBool()) +// true (a boolean) +// 1 (an int where 1 == true and 0 == false) +// +// It returns an error for an invalid option or value. +func (c *Config) Set(option string, value interface{}) error { + c.assertInitialized() + option = strings.Replace(option, "-", "_", -1) + for _, h := range c.configHandlers { + if h.HandlesOption(c, option) { + return h.Set(c, option, value) + } + } + return fmt.Errorf("invalid option %s", option) +} + +func (c *Config) assertInitialized() { + if !c.initialized { + panic("Config{} must be created with NewConfig()") + } +} + +// Validate checks that all values are within specified min/max ranges +func (c *Config) Validate() error { + c.assertInitialized() + for _, h := range c.configHandlers { + if err := h.Validate(c); err != nil { + return err + } + } + return nil +} + +func (c *Config) setDefaults() error { + for _, h := range c.configHandlers { + hh, ok := h.(defaultsHandler) + if ok { + if err := hh.SetDefaults(c); err != nil { + return err + } + } + } + return nil +} + +type structTagsConfig struct{} + +// Handle options that are listed in StructTags +func (h *structTagsConfig) HandlesOption(c *Config, option string) bool { + val := reflect.ValueOf(c).Elem() + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + opt := field.Tag.Get("opt") + if opt == option { + return true + } + } + return false +} + +// Set values based on parameters in StructTags +func (h *structTagsConfig) Set(c *Config, option string, value interface{}) error { + val := reflect.ValueOf(c).Elem() + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + opt := field.Tag.Get("opt") + + if option != opt { + continue + } + + min := field.Tag.Get("min") + max := field.Tag.Get("max") + + fieldVal := val.FieldByName(field.Name) + dest := unsafeValueOf(fieldVal) + coercedVal, err := coerce(value, field.Type) + if err != nil { + return fmt.Errorf("failed to coerce option %s (%v) - %s", + option, value, err) + } + if min != "" { + coercedMinVal, _ := coerce(min, field.Type) + if valueCompare(coercedVal, coercedMinVal) == -1 { + return fmt.Errorf("invalid %s ! %v < %v", + option, coercedVal.Interface(), coercedMinVal.Interface()) + } + } + if max != "" { + coercedMaxVal, _ := coerce(max, field.Type) + if valueCompare(coercedVal, coercedMaxVal) == 1 { + return fmt.Errorf("invalid %s ! %v > %v", + option, coercedVal.Interface(), coercedMaxVal.Interface()) + } + } + if coercedVal.Type().String() == "nsq.BackoffStrategy" { + v := coercedVal.Interface().(BackoffStrategy) + if v, ok := v.(interface { + setConfig(*Config) + }); ok { + v.setConfig(c) + } + } + dest.Set(coercedVal) + return nil + } + return fmt.Errorf("unknown option %s", option) +} + +func (h *structTagsConfig) SetDefaults(c *Config) error { + val := reflect.ValueOf(c).Elem() + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + opt := field.Tag.Get("opt") + defaultVal := field.Tag.Get("default") + if defaultVal == "" || opt == "" { + continue + } + + if err := c.Set(opt, defaultVal); err != nil { + return err + } + } + + hostname, err := os.Hostname() + if err != nil { + log.Fatalf("ERROR: unable to get hostname %s", err.Error()) + } + + c.ClientID = strings.Split(hostname, ".")[0] + c.Hostname = hostname + c.UserAgent = fmt.Sprintf("go-nsq/%s", VERSION) + return nil +} + +func (h *structTagsConfig) Validate(c *Config) error { + val := reflect.ValueOf(c).Elem() + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + + min := field.Tag.Get("min") + max := field.Tag.Get("max") + + if min == "" && max == "" { + continue + } + + value := val.FieldByName(field.Name) + + if min != "" { + coercedMinVal, _ := coerce(min, field.Type) + if valueCompare(value, coercedMinVal) == -1 { + return fmt.Errorf("invalid %s ! %v < %v", + field.Name, value.Interface(), coercedMinVal.Interface()) + } + } + if max != "" { + coercedMaxVal, _ := coerce(max, field.Type) + if valueCompare(value, coercedMaxVal) == 1 { + return fmt.Errorf("invalid %s ! %v > %v", + field.Name, value.Interface(), coercedMaxVal.Interface()) + } + } + } + + if c.HeartbeatInterval > c.ReadTimeout { + return fmt.Errorf("HeartbeatInterval %v must be less than ReadTimeout %v", c.HeartbeatInterval, c.ReadTimeout) + } + + return nil +} + +// Parsing for higher order TLS settings +type tlsConfig struct { + certFile string + keyFile string +} + +func (t *tlsConfig) HandlesOption(c *Config, option string) bool { + switch option { + case "tls_root_ca_file", "tls_insecure_skip_verify", "tls_cert", "tls_key", "tls_min_version": + return true + } + return false +} + +func (t *tlsConfig) Set(c *Config, option string, value interface{}) error { + if c.TlsConfig == nil { + c.TlsConfig = &tls.Config{ + MinVersion: tls.VersionTLS10, + MaxVersion: tls.VersionTLS12, // enable TLS_FALLBACK_SCSV prior to Go 1.5: https://go-review.googlesource.com/#/c/1776/ + } + } + val := reflect.ValueOf(c.TlsConfig).Elem() + + switch option { + case "tls_cert", "tls_key": + if option == "tls_cert" { + t.certFile = value.(string) + } else { + t.keyFile = value.(string) + } + if t.certFile != "" && t.keyFile != "" && len(c.TlsConfig.Certificates) == 0 { + cert, err := tls.LoadX509KeyPair(t.certFile, t.keyFile) + if err != nil { + return err + } + c.TlsConfig.Certificates = []tls.Certificate{cert} + } + return nil + case "tls_root_ca_file": + filename, ok := value.(string) + if !ok { + return fmt.Errorf("ERROR: %v is not a string", value) + } + tlsCertPool := x509.NewCertPool() + caCertFile, err := ioutil.ReadFile(filename) + if err != nil { + return fmt.Errorf("ERROR: failed to read custom Certificate Authority file %s", err) + } + if !tlsCertPool.AppendCertsFromPEM(caCertFile) { + return fmt.Errorf("ERROR: failed to append certificates from Certificate Authority file") + } + c.TlsConfig.RootCAs = tlsCertPool + return nil + case "tls_insecure_skip_verify": + fieldVal := val.FieldByName("InsecureSkipVerify") + dest := unsafeValueOf(fieldVal) + coercedVal, err := coerce(value, fieldVal.Type()) + if err != nil { + return fmt.Errorf("failed to coerce option %s (%v) - %s", + option, value, err) + } + dest.Set(coercedVal) + return nil + case "tls_min_version": + version, ok := value.(string) + if !ok { + return fmt.Errorf("ERROR: %v is not a string", value) + } + switch version { + case "ssl3.0": + c.TlsConfig.MinVersion = tls.VersionSSL30 + case "tls1.0": + c.TlsConfig.MinVersion = tls.VersionTLS10 + case "tls1.1": + c.TlsConfig.MinVersion = tls.VersionTLS11 + case "tls1.2": + c.TlsConfig.MinVersion = tls.VersionTLS12 + default: + return fmt.Errorf("ERROR: %v is not a tls version", value) + } + return nil + } + + return fmt.Errorf("unknown option %s", option) +} + +func (t *tlsConfig) Validate(c *Config) error { + return nil +} + +// because Config contains private structs we can't use reflect.Value +// directly, instead we need to "unsafely" address the variable +func unsafeValueOf(val reflect.Value) reflect.Value { + uptr := unsafe.Pointer(val.UnsafeAddr()) + return reflect.NewAt(val.Type(), uptr).Elem() +} + +func valueCompare(v1 reflect.Value, v2 reflect.Value) int { + switch v1.Type().String() { + case "int", "int16", "int32", "int64": + if v1.Int() > v2.Int() { + return 1 + } else if v1.Int() < v2.Int() { + return -1 + } + return 0 + case "uint", "uint16", "uint32", "uint64": + if v1.Uint() > v2.Uint() { + return 1 + } else if v1.Uint() < v2.Uint() { + return -1 + } + return 0 + case "float32", "float64": + if v1.Float() > v2.Float() { + return 1 + } else if v1.Float() < v2.Float() { + return -1 + } + return 0 + case "time.Duration": + if v1.Interface().(time.Duration) > v2.Interface().(time.Duration) { + return 1 + } else if v1.Interface().(time.Duration) < v2.Interface().(time.Duration) { + return -1 + } + return 0 + } + panic("impossible") +} + +func coerce(v interface{}, typ reflect.Type) (reflect.Value, error) { + var err error + if typ.Kind() == reflect.Ptr { + return reflect.ValueOf(v), nil + } + switch typ.String() { + case "string": + v, err = coerceString(v) + case "int", "int16", "int32", "int64": + v, err = coerceInt64(v) + case "uint", "uint16", "uint32", "uint64": + v, err = coerceUint64(v) + case "float32", "float64": + v, err = coerceFloat64(v) + case "bool": + v, err = coerceBool(v) + case "time.Duration": + v, err = coerceDuration(v) + case "net.Addr": + v, err = coerceAddr(v) + case "nsq.BackoffStrategy": + v, err = coerceBackoffStrategy(v) + default: + v = nil + err = fmt.Errorf("invalid type %s", typ.String()) + } + return valueTypeCoerce(v, typ), err +} + +func valueTypeCoerce(v interface{}, typ reflect.Type) reflect.Value { + val := reflect.ValueOf(v) + if reflect.TypeOf(v) == typ { + return val + } + tval := reflect.New(typ).Elem() + switch typ.String() { + case "int", "int16", "int32", "int64": + tval.SetInt(val.Int()) + case "uint", "uint16", "uint32", "uint64": + tval.SetUint(val.Uint()) + case "float32", "float64": + tval.SetFloat(val.Float()) + default: + tval.Set(val) + } + return tval +} + +func coerceString(v interface{}) (string, error) { + switch v := v.(type) { + case string: + return v, nil + case int, int16, int32, int64, uint, uint16, uint32, uint64: + return fmt.Sprintf("%d", v), nil + case float32, float64: + return fmt.Sprintf("%f", v), nil + } + return fmt.Sprintf("%s", v), nil +} + +func coerceDuration(v interface{}) (time.Duration, error) { + switch v := v.(type) { + case string: + return time.ParseDuration(v) + case int, int16, int32, int64: + // treat like ms + return time.Duration(reflect.ValueOf(v).Int()) * time.Millisecond, nil + case uint, uint16, uint32, uint64: + // treat like ms + return time.Duration(reflect.ValueOf(v).Uint()) * time.Millisecond, nil + case time.Duration: + return v, nil + } + return 0, errors.New("invalid value type") +} + +func coerceAddr(v interface{}) (net.Addr, error) { + switch v := v.(type) { + case string: + return net.ResolveTCPAddr("tcp", v) + case net.Addr: + return v, nil + } + return nil, errors.New("invalid value type") +} + +func coerceBackoffStrategy(v interface{}) (BackoffStrategy, error) { + switch v := v.(type) { + case string: + switch v { + case "", "exponential": + return &ExponentialStrategy{}, nil + case "full_jitter": + return &FullJitterStrategy{}, nil + } + case BackoffStrategy: + return v, nil + } + return nil, errors.New("invalid value type") +} + +func coerceBool(v interface{}) (bool, error) { + switch v := v.(type) { + case bool: + return v, nil + case string: + return strconv.ParseBool(v) + case int, int16, int32, int64: + return reflect.ValueOf(v).Int() != 0, nil + case uint, uint16, uint32, uint64: + return reflect.ValueOf(v).Uint() != 0, nil + } + return false, errors.New("invalid value type") +} + +func coerceFloat64(v interface{}) (float64, error) { + switch v := v.(type) { + case string: + return strconv.ParseFloat(v, 64) + case int, int16, int32, int64: + return float64(reflect.ValueOf(v).Int()), nil + case uint, uint16, uint32, uint64: + return float64(reflect.ValueOf(v).Uint()), nil + case float32: + return float64(v), nil + case float64: + return v, nil + } + return 0, errors.New("invalid value type") +} + +func coerceInt64(v interface{}) (int64, error) { + switch v := v.(type) { + case string: + return strconv.ParseInt(v, 10, 64) + case int, int16, int32, int64: + return reflect.ValueOf(v).Int(), nil + case uint, uint16, uint32, uint64: + return int64(reflect.ValueOf(v).Uint()), nil + } + return 0, errors.New("invalid value type") +} + +func coerceUint64(v interface{}) (uint64, error) { + switch v := v.(type) { + case string: + return strconv.ParseUint(v, 10, 64) + case int, int16, int32, int64: + return uint64(reflect.ValueOf(v).Int()), nil + case uint, uint16, uint32, uint64: + return reflect.ValueOf(v).Uint(), nil + } + return 0, errors.New("invalid value type") +} diff --git a/vendor/github.com/nsqio/go-nsq/config_flag.go b/vendor/github.com/nsqio/go-nsq/config_flag.go new file mode 100644 index 00000000..3e71cabf --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/config_flag.go @@ -0,0 +1,31 @@ +package nsq + +import ( + "strings" +) + +// ConfigFlag wraps a Config and implements the flag.Value interface +type ConfigFlag struct { + Config *Config +} + +// Set takes a comma separated value and follows the rules in Config.Set +// using the first field as the option key, and the second (if present) as the value +func (c *ConfigFlag) Set(opt string) (err error) { + parts := strings.SplitN(opt, ",", 2) + key := parts[0] + + switch len(parts) { + case 1: + // default options specified without a value to boolean true + err = c.Config.Set(key, true) + case 2: + err = c.Config.Set(key, parts[1]) + } + return +} + +// String implements the flag.Value interface +func (c *ConfigFlag) String() string { + return "" +} diff --git a/vendor/github.com/nsqio/go-nsq/conn.go b/vendor/github.com/nsqio/go-nsq/conn.go new file mode 100644 index 00000000..6fa1ce89 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/conn.go @@ -0,0 +1,765 @@ +package nsq + +import ( + "bufio" + "bytes" + "compress/flate" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/snappy" +) + +// IdentifyResponse represents the metadata +// returned from an IDENTIFY command to nsqd +type IdentifyResponse struct { + MaxRdyCount int64 `json:"max_rdy_count"` + TLSv1 bool `json:"tls_v1"` + Deflate bool `json:"deflate"` + Snappy bool `json:"snappy"` + AuthRequired bool `json:"auth_required"` +} + +// AuthResponse represents the metadata +// returned from an AUTH command to nsqd +type AuthResponse struct { + Identity string `json:"identity"` + IdentityUrl string `json:"identity_url"` + PermissionCount int64 `json:"permission_count"` +} + +type msgResponse struct { + msg *Message + cmd *Command + success bool + backoff bool +} + +// Conn represents a connection to nsqd +// +// Conn exposes a set of callbacks for the +// various events that occur on a connection +type Conn struct { + // 64bit atomic vars need to be first for proper alignment on 32bit platforms + messagesInFlight int64 + maxRdyCount int64 + rdyCount int64 + lastRdyTimestamp int64 + lastMsgTimestamp int64 + + mtx sync.Mutex + + config *Config + + conn *net.TCPConn + tlsConn *tls.Conn + addr string + + delegate ConnDelegate + + logger []logger + logLvl LogLevel + logFmt []string + logGuard sync.RWMutex + + r io.Reader + w io.Writer + + cmdChan chan *Command + msgResponseChan chan *msgResponse + exitChan chan int + drainReady chan int + + closeFlag int32 + stopper sync.Once + wg sync.WaitGroup + + readLoopRunning int32 +} + +// NewConn returns a new Conn instance +func NewConn(addr string, config *Config, delegate ConnDelegate) *Conn { + if !config.initialized { + panic("Config must be created with NewConfig()") + } + return &Conn{ + addr: addr, + + config: config, + delegate: delegate, + + maxRdyCount: 2500, + lastMsgTimestamp: time.Now().UnixNano(), + + cmdChan: make(chan *Command), + msgResponseChan: make(chan *msgResponse), + exitChan: make(chan int), + drainReady: make(chan int), + + logger: make([]logger, LogLevelMax+1), + logFmt: make([]string, LogLevelMax+1), + } +} + +// SetLogger assigns the logger to use as well as a level. +// +// The format parameter is expected to be a printf compatible string with +// a single %s argument. This is useful if you want to provide additional +// context to the log messages that the connection will print, the default +// is '(%s)'. +// +// The logger parameter is an interface that requires the following +// method to be implemented (such as the the stdlib log.Logger): +// +// Output(calldepth int, s string) +// +func (c *Conn) SetLogger(l logger, lvl LogLevel, format string) { + c.logGuard.Lock() + defer c.logGuard.Unlock() + + if format == "" { + format = "(%s)" + } + for level := range c.logger { + c.logger[level] = l + c.logFmt[level] = format + } + c.logLvl = lvl +} + +func (c *Conn) SetLoggerForLevel(l logger, lvl LogLevel, format string) { + c.logGuard.Lock() + defer c.logGuard.Unlock() + + if format == "" { + format = "(%s)" + } + c.logger[lvl] = l + c.logFmt[lvl] = format +} + +// SetLoggerLevel sets the package logging level. +func (c *Conn) SetLoggerLevel(lvl LogLevel) { + c.logGuard.Lock() + defer c.logGuard.Unlock() + + c.logLvl = lvl +} + +func (c *Conn) getLogger(lvl LogLevel) (logger, LogLevel, string) { + c.logGuard.RLock() + defer c.logGuard.RUnlock() + + return c.logger[lvl], c.logLvl, c.logFmt[lvl] +} + +func (c *Conn) getLogLevel() LogLevel { + c.logGuard.RLock() + defer c.logGuard.RUnlock() + + return c.logLvl +} + +// Connect dials and bootstraps the nsqd connection +// (including IDENTIFY) and returns the IdentifyResponse +func (c *Conn) Connect() (*IdentifyResponse, error) { + dialer := &net.Dialer{ + LocalAddr: c.config.LocalAddr, + Timeout: c.config.DialTimeout, + } + + conn, err := dialer.Dial("tcp", c.addr) + if err != nil { + return nil, err + } + c.conn = conn.(*net.TCPConn) + c.r = conn + c.w = conn + + _, err = c.Write(MagicV2) + if err != nil { + c.Close() + return nil, fmt.Errorf("[%s] failed to write magic - %s", c.addr, err) + } + + resp, err := c.identify() + if err != nil { + return nil, err + } + + if resp != nil && resp.AuthRequired { + if c.config.AuthSecret == "" { + c.log(LogLevelError, "Auth Required") + return nil, errors.New("Auth Required") + } + err := c.auth(c.config.AuthSecret) + if err != nil { + c.log(LogLevelError, "Auth Failed %s", err) + return nil, err + } + } + + c.wg.Add(2) + atomic.StoreInt32(&c.readLoopRunning, 1) + go c.readLoop() + go c.writeLoop() + return resp, nil +} + +// Close idempotently initiates connection close +func (c *Conn) Close() error { + atomic.StoreInt32(&c.closeFlag, 1) + if c.conn != nil && atomic.LoadInt64(&c.messagesInFlight) == 0 { + return c.conn.CloseRead() + } + return nil +} + +// IsClosing indicates whether or not the +// connection is currently in the processing of +// gracefully closing +func (c *Conn) IsClosing() bool { + return atomic.LoadInt32(&c.closeFlag) == 1 +} + +// RDY returns the current RDY count +func (c *Conn) RDY() int64 { + return atomic.LoadInt64(&c.rdyCount) +} + +// LastRDY returns the previously set RDY count +func (c *Conn) LastRDY() int64 { + return atomic.LoadInt64(&c.rdyCount) +} + +// SetRDY stores the specified RDY count +func (c *Conn) SetRDY(rdy int64) { + atomic.StoreInt64(&c.rdyCount, rdy) + if rdy > 0 { + atomic.StoreInt64(&c.lastRdyTimestamp, time.Now().UnixNano()) + } +} + +// MaxRDY returns the nsqd negotiated maximum +// RDY count that it will accept for this connection +func (c *Conn) MaxRDY() int64 { + return c.maxRdyCount +} + +// LastRdyTime returns the time of the last non-zero RDY +// update for this connection +func (c *Conn) LastRdyTime() time.Time { + return time.Unix(0, atomic.LoadInt64(&c.lastRdyTimestamp)) +} + +// LastMessageTime returns a time.Time representing +// the time at which the last message was received +func (c *Conn) LastMessageTime() time.Time { + return time.Unix(0, atomic.LoadInt64(&c.lastMsgTimestamp)) +} + +// RemoteAddr returns the configured destination nsqd address +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// String returns the fully-qualified address +func (c *Conn) String() string { + return c.addr +} + +// Read performs a deadlined read on the underlying TCP connection +func (c *Conn) Read(p []byte) (int, error) { + c.conn.SetReadDeadline(time.Now().Add(c.config.ReadTimeout)) + return c.r.Read(p) +} + +// Write performs a deadlined write on the underlying TCP connection +func (c *Conn) Write(p []byte) (int, error) { + c.conn.SetWriteDeadline(time.Now().Add(c.config.WriteTimeout)) + return c.w.Write(p) +} + +// WriteCommand is a goroutine safe method to write a Command +// to this connection, and flush. +func (c *Conn) WriteCommand(cmd *Command) error { + c.mtx.Lock() + + _, err := cmd.WriteTo(c) + if err != nil { + goto exit + } + err = c.Flush() + +exit: + c.mtx.Unlock() + if err != nil { + c.log(LogLevelError, "IO error - %s", err) + c.delegate.OnIOError(c, err) + } + return err +} + +type flusher interface { + Flush() error +} + +// Flush writes all buffered data to the underlying TCP connection +func (c *Conn) Flush() error { + if f, ok := c.w.(flusher); ok { + return f.Flush() + } + return nil +} + +func (c *Conn) identify() (*IdentifyResponse, error) { + ci := make(map[string]interface{}) + ci["client_id"] = c.config.ClientID + ci["hostname"] = c.config.Hostname + ci["user_agent"] = c.config.UserAgent + ci["short_id"] = c.config.ClientID // deprecated + ci["long_id"] = c.config.Hostname // deprecated + ci["tls_v1"] = c.config.TlsV1 + ci["deflate"] = c.config.Deflate + ci["deflate_level"] = c.config.DeflateLevel + ci["snappy"] = c.config.Snappy + ci["feature_negotiation"] = true + if c.config.HeartbeatInterval == -1 { + ci["heartbeat_interval"] = -1 + } else { + ci["heartbeat_interval"] = int64(c.config.HeartbeatInterval / time.Millisecond) + } + ci["sample_rate"] = c.config.SampleRate + ci["output_buffer_size"] = c.config.OutputBufferSize + if c.config.OutputBufferTimeout == -1 { + ci["output_buffer_timeout"] = -1 + } else { + ci["output_buffer_timeout"] = int64(c.config.OutputBufferTimeout / time.Millisecond) + } + ci["msg_timeout"] = int64(c.config.MsgTimeout / time.Millisecond) + cmd, err := Identify(ci) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + + err = c.WriteCommand(cmd) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + + if frameType == FrameTypeError { + return nil, ErrIdentify{string(data)} + } + + // check to see if the server was able to respond w/ capabilities + // i.e. it was a JSON response + if data[0] != '{' { + return nil, nil + } + + resp := &IdentifyResponse{} + err = json.Unmarshal(data, resp) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + + c.log(LogLevelDebug, "IDENTIFY response: %+v", resp) + + c.maxRdyCount = resp.MaxRdyCount + + if resp.TLSv1 { + c.log(LogLevelInfo, "upgrading to TLS") + err := c.upgradeTLS(c.config.TlsConfig) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + } + + if resp.Deflate { + c.log(LogLevelInfo, "upgrading to Deflate") + err := c.upgradeDeflate(c.config.DeflateLevel) + if err != nil { + return nil, ErrIdentify{err.Error()} + } + } + + if resp.Snappy { + c.log(LogLevelInfo, "upgrading to Snappy") + err := c.upgradeSnappy() + if err != nil { + return nil, ErrIdentify{err.Error()} + } + } + + // now that connection is bootstrapped, enable read buffering + // (and write buffering if it's not already capable of Flush()) + c.r = bufio.NewReader(c.r) + if _, ok := c.w.(flusher); !ok { + c.w = bufio.NewWriter(c.w) + } + + return resp, nil +} + +func (c *Conn) upgradeTLS(tlsConf *tls.Config) error { + host, _, err := net.SplitHostPort(c.addr) + if err != nil { + return err + } + + // create a local copy of the config to set ServerName for this connection + conf := &tls.Config{} + if tlsConf != nil { + conf = tlsConf.Clone() + } + conf.ServerName = host + + c.tlsConn = tls.Client(c.conn, conf) + err = c.tlsConn.Handshake() + if err != nil { + return err + } + c.r = c.tlsConn + c.w = c.tlsConn + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + return err + } + if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { + return errors.New("invalid response from TLS upgrade") + } + return nil +} + +func (c *Conn) upgradeDeflate(level int) error { + conn := net.Conn(c.conn) + if c.tlsConn != nil { + conn = c.tlsConn + } + fw, _ := flate.NewWriter(conn, level) + c.r = flate.NewReader(conn) + c.w = fw + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + return err + } + if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { + return errors.New("invalid response from Deflate upgrade") + } + return nil +} + +func (c *Conn) upgradeSnappy() error { + conn := net.Conn(c.conn) + if c.tlsConn != nil { + conn = c.tlsConn + } + c.r = snappy.NewReader(conn) + c.w = snappy.NewWriter(conn) + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + return err + } + if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { + return errors.New("invalid response from Snappy upgrade") + } + return nil +} + +func (c *Conn) auth(secret string) error { + cmd, err := Auth(secret) + if err != nil { + return err + } + + err = c.WriteCommand(cmd) + if err != nil { + return err + } + + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + return err + } + + if frameType == FrameTypeError { + return errors.New("Error authenticating " + string(data)) + } + + resp := &AuthResponse{} + err = json.Unmarshal(data, resp) + if err != nil { + return err + } + + c.log(LogLevelInfo, "Auth accepted. Identity: %q %s Permissions: %d", + resp.Identity, resp.IdentityUrl, resp.PermissionCount) + + return nil +} + +func (c *Conn) readLoop() { + delegate := &connMessageDelegate{c} + for { + if atomic.LoadInt32(&c.closeFlag) == 1 { + goto exit + } + + frameType, data, err := ReadUnpackedResponse(c) + if err != nil { + if err == io.EOF && atomic.LoadInt32(&c.closeFlag) == 1 { + goto exit + } + if !strings.Contains(err.Error(), "use of closed network connection") { + c.log(LogLevelError, "IO error - %s", err) + c.delegate.OnIOError(c, err) + } + goto exit + } + + if frameType == FrameTypeResponse && bytes.Equal(data, []byte("_heartbeat_")) { + c.log(LogLevelDebug, "heartbeat received") + c.delegate.OnHeartbeat(c) + err := c.WriteCommand(Nop()) + if err != nil { + c.log(LogLevelError, "IO error - %s", err) + c.delegate.OnIOError(c, err) + goto exit + } + continue + } + + switch frameType { + case FrameTypeResponse: + c.delegate.OnResponse(c, data) + case FrameTypeMessage: + msg, err := DecodeMessage(data) + if err != nil { + c.log(LogLevelError, "IO error - %s", err) + c.delegate.OnIOError(c, err) + goto exit + } + msg.Delegate = delegate + msg.NSQDAddress = c.String() + + atomic.AddInt64(&c.messagesInFlight, 1) + atomic.StoreInt64(&c.lastMsgTimestamp, time.Now().UnixNano()) + + c.delegate.OnMessage(c, msg) + case FrameTypeError: + c.log(LogLevelError, "protocol error - %s", data) + c.delegate.OnError(c, data) + default: + c.log(LogLevelError, "IO error - %s", err) + c.delegate.OnIOError(c, fmt.Errorf("unknown frame type %d", frameType)) + } + } + +exit: + atomic.StoreInt32(&c.readLoopRunning, 0) + // start the connection close + messagesInFlight := atomic.LoadInt64(&c.messagesInFlight) + if messagesInFlight == 0 { + // if we exited readLoop with no messages in flight + // we need to explicitly trigger the close because + // writeLoop won't + c.close() + } else { + c.log(LogLevelWarning, "delaying close, %d outstanding messages", messagesInFlight) + } + c.wg.Done() + c.log(LogLevelInfo, "readLoop exiting") +} + +func (c *Conn) writeLoop() { + for { + select { + case <-c.exitChan: + c.log(LogLevelInfo, "breaking out of writeLoop") + // Indicate drainReady because we will not pull any more off msgResponseChan + close(c.drainReady) + goto exit + case cmd := <-c.cmdChan: + err := c.WriteCommand(cmd) + if err != nil { + c.log(LogLevelError, "error sending command %s - %s", cmd, err) + c.close() + continue + } + case resp := <-c.msgResponseChan: + // Decrement this here so it is correct even if we can't respond to nsqd + msgsInFlight := atomic.AddInt64(&c.messagesInFlight, -1) + + if resp.success { + c.log(LogLevelDebug, "FIN %s", resp.msg.ID) + c.delegate.OnMessageFinished(c, resp.msg) + c.delegate.OnResume(c) + } else { + c.log(LogLevelDebug, "REQ %s", resp.msg.ID) + c.delegate.OnMessageRequeued(c, resp.msg) + if resp.backoff { + c.delegate.OnBackoff(c) + } else { + c.delegate.OnContinue(c) + } + } + + err := c.WriteCommand(resp.cmd) + if err != nil { + c.log(LogLevelError, "error sending command %s - %s", resp.cmd, err) + c.close() + continue + } + + if msgsInFlight == 0 && + atomic.LoadInt32(&c.closeFlag) == 1 { + c.close() + continue + } + } + } + +exit: + c.wg.Done() + c.log(LogLevelInfo, "writeLoop exiting") +} + +func (c *Conn) close() { + // a "clean" connection close is orchestrated as follows: + // + // 1. CLOSE cmd sent to nsqd + // 2. CLOSE_WAIT response received from nsqd + // 3. set c.closeFlag + // 4. readLoop() exits + // a. if messages-in-flight > 0 delay close() + // i. writeLoop() continues receiving on c.msgResponseChan chan + // x. when messages-in-flight == 0 call close() + // b. else call close() immediately + // 5. c.exitChan close + // a. writeLoop() exits + // i. c.drainReady close + // 6a. launch cleanup() goroutine (we're racing with intraprocess + // routed messages, see comments below) + // a. wait on c.drainReady + // b. loop and receive on c.msgResponseChan chan + // until messages-in-flight == 0 + // i. ensure that readLoop has exited + // 6b. launch waitForCleanup() goroutine + // b. wait on waitgroup (covers readLoop() and writeLoop() + // and cleanup goroutine) + // c. underlying TCP connection close + // d. trigger Delegate OnClose() + // + c.stopper.Do(func() { + c.log(LogLevelInfo, "beginning close") + close(c.exitChan) + c.conn.CloseRead() + + c.wg.Add(1) + go c.cleanup() + + go c.waitForCleanup() + }) +} + +func (c *Conn) cleanup() { + <-c.drainReady + ticker := time.NewTicker(100 * time.Millisecond) + lastWarning := time.Now() + // writeLoop has exited, drain any remaining in flight messages + for { + // we're racing with readLoop which potentially has a message + // for handling so infinitely loop until messagesInFlight == 0 + // and readLoop has exited + var msgsInFlight int64 + select { + case <-c.msgResponseChan: + msgsInFlight = atomic.AddInt64(&c.messagesInFlight, -1) + case <-ticker.C: + msgsInFlight = atomic.LoadInt64(&c.messagesInFlight) + } + if msgsInFlight > 0 { + if time.Now().Sub(lastWarning) > time.Second { + c.log(LogLevelWarning, "draining... waiting for %d messages in flight", msgsInFlight) + lastWarning = time.Now() + } + continue + } + // until the readLoop has exited we cannot be sure that there + // still won't be a race + if atomic.LoadInt32(&c.readLoopRunning) == 1 { + if time.Now().Sub(lastWarning) > time.Second { + c.log(LogLevelWarning, "draining... readLoop still running") + lastWarning = time.Now() + } + continue + } + goto exit + } + +exit: + ticker.Stop() + c.wg.Done() + c.log(LogLevelInfo, "finished draining, cleanup exiting") +} + +func (c *Conn) waitForCleanup() { + // this blocks until readLoop and writeLoop + // (and cleanup goroutine above) have exited + c.wg.Wait() + c.conn.CloseWrite() + c.log(LogLevelInfo, "clean close complete") + c.delegate.OnClose(c) +} + +func (c *Conn) onMessageFinish(m *Message) { + c.msgResponseChan <- &msgResponse{msg: m, cmd: Finish(m.ID), success: true} +} + +func (c *Conn) onMessageRequeue(m *Message, delay time.Duration, backoff bool) { + if delay == -1 { + // linear delay + delay = c.config.DefaultRequeueDelay * time.Duration(m.Attempts) + // bound the requeueDelay to configured max + if delay > c.config.MaxRequeueDelay { + delay = c.config.MaxRequeueDelay + } + } + c.msgResponseChan <- &msgResponse{msg: m, cmd: Requeue(m.ID, delay), success: false, backoff: backoff} +} + +func (c *Conn) onMessageTouch(m *Message) { + select { + case c.cmdChan <- Touch(m.ID): + case <-c.exitChan: + } +} + +func (c *Conn) log(lvl LogLevel, line string, args ...interface{}) { + logger, logLvl, logFmt := c.getLogger(lvl) + + if logger == nil { + return + } + + if logLvl > lvl { + return + } + + logger.Output(2, fmt.Sprintf("%-4s %s %s", lvl, + fmt.Sprintf(logFmt, c.String()), + fmt.Sprintf(line, args...))) +} diff --git a/vendor/github.com/nsqio/go-nsq/consumer.go b/vendor/github.com/nsqio/go-nsq/consumer.go new file mode 100644 index 00000000..b4d7487b --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/consumer.go @@ -0,0 +1,1230 @@ +package nsq + +import ( + "bytes" + "errors" + "fmt" + "log" + "math" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Handler is the message processing interface for Consumer +// +// Implement this interface for handlers that return whether or not message +// processing completed successfully. +// +// When the return value is nil Consumer will automatically handle FINishing. +// +// When the returned value is non-nil Consumer will automatically handle REQueing. +type Handler interface { + HandleMessage(message *Message) error +} + +// HandlerFunc is a convenience type to avoid having to declare a struct +// to implement the Handler interface, it can be used like this: +// +// consumer.AddHandler(nsq.HandlerFunc(func(m *Message) error { +// // handle the message +// })) +type HandlerFunc func(message *Message) error + +// HandleMessage implements the Handler interface +func (h HandlerFunc) HandleMessage(m *Message) error { + return h(m) +} + +// DiscoveryFilter is an interface accepted by `SetBehaviorDelegate()` +// for filtering the nsqds returned from discovery via nsqlookupd +type DiscoveryFilter interface { + Filter([]string) []string +} + +// FailedMessageLogger is an interface that can be implemented by handlers that wish +// to receive a callback when a message is deemed "failed" (i.e. the number of attempts +// exceeded the Consumer specified MaxAttemptCount) +type FailedMessageLogger interface { + LogFailedMessage(message *Message) +} + +// ConsumerStats represents a snapshot of the state of a Consumer's connections and the messages +// it has seen +type ConsumerStats struct { + MessagesReceived uint64 + MessagesFinished uint64 + MessagesRequeued uint64 + Connections int +} + +var instCount int64 + +type backoffSignal int + +const ( + backoffFlag backoffSignal = iota + continueFlag + resumeFlag +) + +// Consumer is a high-level type to consume from NSQ. +// +// A Consumer instance is supplied a Handler that will be executed +// concurrently via goroutines to handle processing the stream of messages +// consumed from the specified topic/channel. See: Handler/HandlerFunc +// for details on implementing the interface to create handlers. +// +// If configured, it will poll nsqlookupd instances and handle connection (and +// reconnection) to any discovered nsqds. +type Consumer struct { + // 64bit atomic vars need to be first for proper alignment on 32bit platforms + messagesReceived uint64 + messagesFinished uint64 + messagesRequeued uint64 + totalRdyCount int64 + backoffDuration int64 + backoffCounter int32 + maxInFlight int32 + + mtx sync.RWMutex + + logger []logger + logLvl LogLevel + logGuard sync.RWMutex + + behaviorDelegate interface{} + + id int64 + topic string + channel string + config Config + + rngMtx sync.Mutex + rng *rand.Rand + + needRDYRedistributed int32 + + backoffMtx sync.Mutex + + incomingMessages chan *Message + + rdyRetryMtx sync.Mutex + rdyRetryTimers map[string]*time.Timer + + pendingConnections map[string]*Conn + connections map[string]*Conn + + nsqdTCPAddrs []string + + // used at connection close to force a possible reconnect + lookupdRecheckChan chan int + lookupdHTTPAddrs []string + lookupdQueryIndex int + lookupdHttpClient *http.Client + + wg sync.WaitGroup + runningHandlers int32 + stopFlag int32 + connectedFlag int32 + stopHandler sync.Once + exitHandler sync.Once + + // read from this channel to block until consumer is cleanly stopped + StopChan chan int + exitChan chan int +} + +// NewConsumer creates a new instance of Consumer for the specified topic/channel +// +// The only valid way to create a Config is via NewConfig, using a struct literal will panic. +// After Config is passed into NewConsumer the values are no longer mutable (they are copied). +func NewConsumer(topic string, channel string, config *Config) (*Consumer, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + if !IsValidTopicName(topic) { + return nil, errors.New("invalid topic name") + } + + if !IsValidChannelName(channel) { + return nil, errors.New("invalid channel name") + } + + r := &Consumer{ + id: atomic.AddInt64(&instCount, 1), + + topic: topic, + channel: channel, + config: *config, + + logger: make([]logger, LogLevelMax+1), + logLvl: LogLevelInfo, + maxInFlight: int32(config.MaxInFlight), + + incomingMessages: make(chan *Message), + + rdyRetryTimers: make(map[string]*time.Timer), + pendingConnections: make(map[string]*Conn), + connections: make(map[string]*Conn), + + lookupdRecheckChan: make(chan int, 1), + + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + + StopChan: make(chan int), + exitChan: make(chan int), + } + + // Set default logger for all log levels + l := log.New(os.Stderr, "", log.Flags()) + for index := range r.logger { + r.logger[index] = l + } + + r.wg.Add(1) + go r.rdyLoop() + return r, nil +} + +// Stats retrieves the current connection and message statistics for a Consumer +func (r *Consumer) Stats() *ConsumerStats { + return &ConsumerStats{ + MessagesReceived: atomic.LoadUint64(&r.messagesReceived), + MessagesFinished: atomic.LoadUint64(&r.messagesFinished), + MessagesRequeued: atomic.LoadUint64(&r.messagesRequeued), + Connections: len(r.conns()), + } +} + +func (r *Consumer) conns() []*Conn { + r.mtx.RLock() + conns := make([]*Conn, 0, len(r.connections)) + for _, c := range r.connections { + conns = append(conns, c) + } + r.mtx.RUnlock() + return conns +} + +// SetLogger assigns the logger to use as well as a level +// +// The logger parameter is an interface that requires the following +// method to be implemented (such as the the stdlib log.Logger): +// +// Output(calldepth int, s string) error +// +func (r *Consumer) SetLogger(l logger, lvl LogLevel) { + r.logGuard.Lock() + defer r.logGuard.Unlock() + + for level := range r.logger { + r.logger[level] = l + } + r.logLvl = lvl +} + +// SetLoggerForLevel assigns the same logger for specified `level`. +func (r *Consumer) SetLoggerForLevel(l logger, lvl LogLevel) { + r.logGuard.Lock() + defer r.logGuard.Unlock() + + r.logger[lvl] = l +} + +// SetLoggerLevel sets the package logging level. +func (r *Consumer) SetLoggerLevel(lvl LogLevel) { + r.logGuard.Lock() + defer r.logGuard.Unlock() + + r.logLvl = lvl +} + +func (r *Consumer) getLogger(lvl LogLevel) (logger, LogLevel) { + r.logGuard.RLock() + defer r.logGuard.RUnlock() + + return r.logger[lvl], r.logLvl +} + +func (r *Consumer) getLogLevel() LogLevel { + r.logGuard.RLock() + defer r.logGuard.RUnlock() + + return r.logLvl +} + +// SetBehaviorDelegate takes a type implementing one or more +// of the following interfaces that modify the behavior +// of the `Consumer`: +// +// DiscoveryFilter +// +func (r *Consumer) SetBehaviorDelegate(cb interface{}) { + matched := false + + if _, ok := cb.(DiscoveryFilter); ok { + matched = true + } + + if !matched { + panic("behavior delegate does not have any recognized methods") + } + + r.behaviorDelegate = cb +} + +// perConnMaxInFlight calculates the per-connection max-in-flight count. +// +// This may change dynamically based on the number of connections to nsqd the Consumer +// is responsible for. +func (r *Consumer) perConnMaxInFlight() int64 { + b := float64(r.getMaxInFlight()) + s := b / float64(len(r.conns())) + return int64(math.Min(math.Max(1, s), b)) +} + +// IsStarved indicates whether any connections for this consumer are blocked on processing +// before being able to receive more messages (ie. RDY count of 0 and not exiting) +func (r *Consumer) IsStarved() bool { + for _, conn := range r.conns() { + threshold := int64(float64(conn.RDY()) * 0.85) + inFlight := atomic.LoadInt64(&conn.messagesInFlight) + if inFlight >= threshold && inFlight > 0 && !conn.IsClosing() { + return true + } + } + return false +} + +func (r *Consumer) getMaxInFlight() int32 { + return atomic.LoadInt32(&r.maxInFlight) +} + +// ChangeMaxInFlight sets a new maximum number of messages this comsumer instance +// will allow in-flight, and updates all existing connections as appropriate. +// +// For example, ChangeMaxInFlight(0) would pause message flow +// +// If already connected, it updates the reader RDY state for each connection. +func (r *Consumer) ChangeMaxInFlight(maxInFlight int) { + if r.getMaxInFlight() == int32(maxInFlight) { + return + } + + atomic.StoreInt32(&r.maxInFlight, int32(maxInFlight)) + + for _, c := range r.conns() { + r.maybeUpdateRDY(c) + } +} + +// set lookupd http client +func (r *Consumer) SetLookupdHttpClient(httpclient *http.Client) { + r.lookupdHttpClient = httpclient +} + +// ConnectToNSQLookupd adds an nsqlookupd address to the list for this Consumer instance. +// +// If it is the first to be added, it initiates an HTTP request to discover nsqd +// producers for the configured topic. +// +// A goroutine is spawned to handle continual polling. +func (r *Consumer) ConnectToNSQLookupd(addr string) error { + if atomic.LoadInt32(&r.stopFlag) == 1 { + return errors.New("consumer stopped") + } + if atomic.LoadInt32(&r.runningHandlers) == 0 { + return errors.New("no handlers") + } + + parsedAddr, err := buildLookupAddr(addr, r.topic) + if err != nil { + return err + } + + atomic.StoreInt32(&r.connectedFlag, 1) + + r.mtx.Lock() + for _, x := range r.lookupdHTTPAddrs { + if x == parsedAddr { + r.mtx.Unlock() + return nil + } + } + r.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs, parsedAddr) + if r.lookupdHttpClient == nil { + transport := &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: r.config.LookupdPollTimeout, + KeepAlive: 30 * time.Second, + }).DialContext, + ResponseHeaderTimeout: r.config.LookupdPollTimeout, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + } + r.lookupdHttpClient = &http.Client{ + Transport: transport, + Timeout: r.config.LookupdPollTimeout, + } + } + + numLookupd := len(r.lookupdHTTPAddrs) + r.mtx.Unlock() + + // if this is the first one, kick off the go loop + if numLookupd == 1 { + r.queryLookupd() + r.wg.Add(1) + go r.lookupdLoop() + } + + return nil +} + +// ConnectToNSQLookupds adds multiple nsqlookupd address to the list for this Consumer instance. +// +// If adding the first address it initiates an HTTP request to discover nsqd +// producers for the configured topic. +// +// A goroutine is spawned to handle continual polling. +func (r *Consumer) ConnectToNSQLookupds(addresses []string) error { + for _, addr := range addresses { + err := r.ConnectToNSQLookupd(addr) + if err != nil { + return err + } + } + return nil +} + +// poll all known lookup servers every LookupdPollInterval +func (r *Consumer) lookupdLoop() { + // add some jitter so that multiple consumers discovering the same topic, + // when restarted at the same time, dont all connect at once. + r.rngMtx.Lock() + jitter := time.Duration(int64(r.rng.Float64() * + r.config.LookupdPollJitter * float64(r.config.LookupdPollInterval))) + r.rngMtx.Unlock() + var ticker *time.Ticker + + select { + case <-time.After(jitter): + case <-r.exitChan: + goto exit + } + + ticker = time.NewTicker(r.config.LookupdPollInterval) + + for { + select { + case <-ticker.C: + r.queryLookupd() + case <-r.lookupdRecheckChan: + r.queryLookupd() + case <-r.exitChan: + goto exit + } + } + +exit: + if ticker != nil { + ticker.Stop() + } + r.log(LogLevelInfo, "exiting lookupdLoop") + r.wg.Done() +} + +// return the next lookupd endpoint to query +// keeping track of which one was last used +func (r *Consumer) nextLookupdEndpoint() string { + r.mtx.RLock() + if r.lookupdQueryIndex >= len(r.lookupdHTTPAddrs) { + r.lookupdQueryIndex = 0 + } + addr := r.lookupdHTTPAddrs[r.lookupdQueryIndex] + num := len(r.lookupdHTTPAddrs) + r.mtx.RUnlock() + r.lookupdQueryIndex = (r.lookupdQueryIndex + 1) % num + + return addr +} + +type lookupResp struct { + Channels []string `json:"channels"` + Producers []*peerInfo `json:"producers"` + Timestamp int64 `json:"timestamp"` +} + +type peerInfo struct { + RemoteAddress string `json:"remote_address"` + Hostname string `json:"hostname"` + BroadcastAddress string `json:"broadcast_address"` + TCPPort int `json:"tcp_port"` + HTTPPort int `json:"http_port"` + Version string `json:"version"` +} + +// make an HTTP req to one of the configured nsqlookupd instances to discover +// which nsqd's provide the topic we are consuming. +// +// initiate a connection to any new producers that are identified. +func (r *Consumer) queryLookupd() { + retries := 0 + +retry: + endpoint := r.nextLookupdEndpoint() + + r.log(LogLevelInfo, "querying nsqlookupd %s", endpoint) + + var data lookupResp + headers := make(http.Header) + if r.config.AuthSecret != "" && r.config.LookupdAuthorization { + headers.Set("Authorization", fmt.Sprintf("Bearer %s", r.config.AuthSecret)) + } + err := apiRequestNegotiateV1(r.lookupdHttpClient, "GET", endpoint, headers, &data) + if err != nil { + r.log(LogLevelError, "error querying nsqlookupd (%s) - %s", endpoint, err) + retries++ + if retries < 3 { + r.log(LogLevelInfo, "retrying with next nsqlookupd") + goto retry + } + return + } + + var nsqdAddrs []string + for _, producer := range data.Producers { + broadcastAddress := producer.BroadcastAddress + port := producer.TCPPort + joined := net.JoinHostPort(broadcastAddress, strconv.Itoa(port)) + nsqdAddrs = append(nsqdAddrs, joined) + } + // apply filter + if discoveryFilter, ok := r.behaviorDelegate.(DiscoveryFilter); ok { + nsqdAddrs = discoveryFilter.Filter(nsqdAddrs) + } + for _, addr := range nsqdAddrs { + err = r.ConnectToNSQD(addr) + if err != nil && err != ErrAlreadyConnected { + r.log(LogLevelError, "(%s) error connecting to nsqd - %s", addr, err) + continue + } + } +} + +// ConnectToNSQDs takes multiple nsqd addresses to connect directly to. +// +// It is recommended to use ConnectToNSQLookupd so that topics are discovered +// automatically. This method is useful when you want to connect to local instance. +func (r *Consumer) ConnectToNSQDs(addresses []string) error { + for _, addr := range addresses { + err := r.ConnectToNSQD(addr) + if err != nil { + return err + } + } + return nil +} + +// ConnectToNSQD takes a nsqd address to connect directly to. +// +// It is recommended to use ConnectToNSQLookupd so that topics are discovered +// automatically. This method is useful when you want to connect to a single, local, +// instance. +func (r *Consumer) ConnectToNSQD(addr string) error { + if atomic.LoadInt32(&r.stopFlag) == 1 { + return errors.New("consumer stopped") + } + + if atomic.LoadInt32(&r.runningHandlers) == 0 { + return errors.New("no handlers") + } + + atomic.StoreInt32(&r.connectedFlag, 1) + + conn := NewConn(addr, &r.config, &consumerConnDelegate{r}) + conn.SetLoggerLevel(r.getLogLevel()) + format := fmt.Sprintf("%3d [%s/%s] (%%s)", r.id, r.topic, r.channel) + for index := range r.logger { + conn.SetLoggerForLevel(r.logger[index], LogLevel(index), format) + } + r.mtx.Lock() + _, pendingOk := r.pendingConnections[addr] + _, ok := r.connections[addr] + if ok || pendingOk { + r.mtx.Unlock() + return ErrAlreadyConnected + } + r.pendingConnections[addr] = conn + if idx := indexOf(addr, r.nsqdTCPAddrs); idx == -1 { + r.nsqdTCPAddrs = append(r.nsqdTCPAddrs, addr) + } + r.mtx.Unlock() + + r.log(LogLevelInfo, "(%s) connecting to nsqd", addr) + + cleanupConnection := func() { + r.mtx.Lock() + delete(r.pendingConnections, addr) + r.mtx.Unlock() + conn.Close() + } + + resp, err := conn.Connect() + if err != nil { + cleanupConnection() + return err + } + + if resp != nil { + if resp.MaxRdyCount < int64(r.getMaxInFlight()) { + r.log(LogLevelWarning, + "(%s) max RDY count %d < consumer max in flight %d, truncation possible", + conn.String(), resp.MaxRdyCount, r.getMaxInFlight()) + } + } + + cmd := Subscribe(r.topic, r.channel) + err = conn.WriteCommand(cmd) + if err != nil { + cleanupConnection() + return fmt.Errorf("[%s] failed to subscribe to %s:%s - %s", + conn, r.topic, r.channel, err.Error()) + } + + r.mtx.Lock() + delete(r.pendingConnections, addr) + r.connections[addr] = conn + r.mtx.Unlock() + + // pre-emptive signal to existing connections to lower their RDY count + for _, c := range r.conns() { + r.maybeUpdateRDY(c) + } + + return nil +} + +func indexOf(n string, h []string) int { + for i, a := range h { + if n == a { + return i + } + } + return -1 +} + +// DisconnectFromNSQD closes the connection to and removes the specified +// `nsqd` address from the list +func (r *Consumer) DisconnectFromNSQD(addr string) error { + r.mtx.Lock() + defer r.mtx.Unlock() + + idx := indexOf(addr, r.nsqdTCPAddrs) + if idx == -1 { + return ErrNotConnected + } + + // slice delete + r.nsqdTCPAddrs = append(r.nsqdTCPAddrs[:idx], r.nsqdTCPAddrs[idx+1:]...) + + pendingConn, pendingOk := r.pendingConnections[addr] + conn, ok := r.connections[addr] + + if ok { + conn.Close() + } else if pendingOk { + pendingConn.Close() + } + + return nil +} + +// DisconnectFromNSQLookupd removes the specified `nsqlookupd` address +// from the list used for periodic discovery. +func (r *Consumer) DisconnectFromNSQLookupd(addr string) error { + parsedAddr, err := buildLookupAddr(addr, r.topic) + if err != nil { + return err + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + idx := indexOf(parsedAddr, r.lookupdHTTPAddrs) + if idx == -1 { + return ErrNotConnected + } + + if len(r.lookupdHTTPAddrs) == 1 { + return fmt.Errorf("cannot disconnect from only remaining nsqlookupd HTTP address %s", addr) + } + + r.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs[:idx], r.lookupdHTTPAddrs[idx+1:]...) + + return nil +} + +func (r *Consumer) onConnMessage(c *Conn, msg *Message) { + atomic.AddUint64(&r.messagesReceived, 1) + r.incomingMessages <- msg +} + +func (r *Consumer) onConnMessageFinished(c *Conn, msg *Message) { + atomic.AddUint64(&r.messagesFinished, 1) +} + +func (r *Consumer) onConnMessageRequeued(c *Conn, msg *Message) { + atomic.AddUint64(&r.messagesRequeued, 1) +} + +func (r *Consumer) onConnBackoff(c *Conn) { + r.startStopContinueBackoff(c, backoffFlag) +} + +func (r *Consumer) onConnContinue(c *Conn) { + r.startStopContinueBackoff(c, continueFlag) +} + +func (r *Consumer) onConnResume(c *Conn) { + r.startStopContinueBackoff(c, resumeFlag) +} + +func (r *Consumer) onConnResponse(c *Conn, data []byte) { + switch { + case bytes.Equal(data, []byte("CLOSE_WAIT")): + // server is ready for us to close (it ack'd our StartClose) + // we can assume we will not receive any more messages over this channel + // (but we can still write back responses) + r.log(LogLevelInfo, "(%s) received CLOSE_WAIT from nsqd", c.String()) + c.Close() + } +} + +func (r *Consumer) onConnError(c *Conn, data []byte) {} + +func (r *Consumer) onConnHeartbeat(c *Conn) {} + +func (r *Consumer) onConnIOError(c *Conn, err error) { + c.Close() +} + +func (r *Consumer) onConnClose(c *Conn) { + var hasRDYRetryTimer bool + + // remove this connections RDY count from the consumer's total + rdyCount := c.RDY() + atomic.AddInt64(&r.totalRdyCount, -rdyCount) + + r.rdyRetryMtx.Lock() + if timer, ok := r.rdyRetryTimers[c.String()]; ok { + // stop any pending retry of an old RDY update + timer.Stop() + delete(r.rdyRetryTimers, c.String()) + hasRDYRetryTimer = true + } + r.rdyRetryMtx.Unlock() + + r.mtx.Lock() + delete(r.connections, c.String()) + left := len(r.connections) + r.mtx.Unlock() + + r.log(LogLevelWarning, "there are %d connections left alive", left) + + if (hasRDYRetryTimer || rdyCount > 0) && + (int32(left) == r.getMaxInFlight() || r.inBackoff()) { + // we're toggling out of (normal) redistribution cases and this conn + // had a RDY count... + // + // trigger RDY redistribution to make sure this RDY is moved + // to a new connection + atomic.StoreInt32(&r.needRDYRedistributed, 1) + } + + // we were the last one (and stopping) + if atomic.LoadInt32(&r.stopFlag) == 1 { + if left == 0 { + r.stopHandlers() + } + return + } + + r.mtx.RLock() + numLookupd := len(r.lookupdHTTPAddrs) + reconnect := indexOf(c.String(), r.nsqdTCPAddrs) >= 0 + r.mtx.RUnlock() + if numLookupd > 0 { + // trigger a poll of the lookupd + select { + case r.lookupdRecheckChan <- 1: + default: + } + } else if reconnect { + // there are no lookupd and we still have this nsqd TCP address in our list... + // try to reconnect after a bit + go func(addr string) { + for { + r.log(LogLevelInfo, "(%s) re-connecting in %s", addr, r.config.LookupdPollInterval) + time.Sleep(r.config.LookupdPollInterval) + if atomic.LoadInt32(&r.stopFlag) == 1 { + break + } + r.mtx.RLock() + reconnect := indexOf(addr, r.nsqdTCPAddrs) >= 0 + r.mtx.RUnlock() + if !reconnect { + r.log(LogLevelWarning, "(%s) skipped reconnect after removal...", addr) + return + } + err := r.ConnectToNSQD(addr) + if err != nil && err != ErrAlreadyConnected { + r.log(LogLevelError, "(%s) error connecting to nsqd - %s", addr, err) + continue + } + break + } + }(c.String()) + } +} + +func (r *Consumer) startStopContinueBackoff(conn *Conn, signal backoffSignal) { + // prevent many async failures/successes from immediately resulting in + // max backoff/normal rate (by ensuring that we dont continually incr/decr + // the counter during a backoff period) + r.backoffMtx.Lock() + defer r.backoffMtx.Unlock() + if r.inBackoffTimeout() { + return + } + + // update backoff state + backoffUpdated := false + backoffCounter := atomic.LoadInt32(&r.backoffCounter) + switch signal { + case resumeFlag: + if backoffCounter > 0 { + backoffCounter-- + backoffUpdated = true + } + case backoffFlag: + nextBackoff := r.config.BackoffStrategy.Calculate(int(backoffCounter) + 1) + if nextBackoff <= r.config.MaxBackoffDuration { + backoffCounter++ + backoffUpdated = true + } + } + atomic.StoreInt32(&r.backoffCounter, backoffCounter) + + if r.backoffCounter == 0 && backoffUpdated { + // exit backoff + count := r.perConnMaxInFlight() + r.log(LogLevelWarning, "exiting backoff, returning all to RDY %d", count) + for _, c := range r.conns() { + r.updateRDY(c, count) + } + } else if r.backoffCounter > 0 { + // start or continue backoff + backoffDuration := r.config.BackoffStrategy.Calculate(int(backoffCounter)) + + if backoffDuration > r.config.MaxBackoffDuration { + backoffDuration = r.config.MaxBackoffDuration + } + + r.log(LogLevelWarning, "backing off for %s (backoff level %d), setting all to RDY 0", + backoffDuration, backoffCounter) + + // send RDY 0 immediately (to *all* connections) + for _, c := range r.conns() { + r.updateRDY(c, 0) + } + + r.backoff(backoffDuration) + } +} + +func (r *Consumer) backoff(d time.Duration) { + atomic.StoreInt64(&r.backoffDuration, d.Nanoseconds()) + time.AfterFunc(d, r.resume) +} + +func (r *Consumer) resume() { + if atomic.LoadInt32(&r.stopFlag) == 1 { + atomic.StoreInt64(&r.backoffDuration, 0) + return + } + + // pick a random connection to test the waters + conns := r.conns() + if len(conns) == 0 { + r.log(LogLevelWarning, "no connection available to resume") + r.log(LogLevelWarning, "backing off for %s", time.Second) + r.backoff(time.Second) + return + } + r.rngMtx.Lock() + idx := r.rng.Intn(len(conns)) + r.rngMtx.Unlock() + choice := conns[idx] + + r.log(LogLevelWarning, + "(%s) backoff timeout expired, sending RDY 1", + choice.String()) + + // while in backoff only ever let 1 message at a time through + err := r.updateRDY(choice, 1) + if err != nil { + r.log(LogLevelWarning, "(%s) error resuming RDY 1 - %s", choice.String(), err) + r.log(LogLevelWarning, "backing off for %s", time.Second) + r.backoff(time.Second) + return + } + + atomic.StoreInt64(&r.backoffDuration, 0) +} + +func (r *Consumer) inBackoff() bool { + return atomic.LoadInt32(&r.backoffCounter) > 0 +} + +func (r *Consumer) inBackoffTimeout() bool { + return atomic.LoadInt64(&r.backoffDuration) > 0 +} + +func (r *Consumer) maybeUpdateRDY(conn *Conn) { + inBackoff := r.inBackoff() + inBackoffTimeout := r.inBackoffTimeout() + if inBackoff || inBackoffTimeout { + r.log(LogLevelDebug, "(%s) skip sending RDY inBackoff:%v || inBackoffTimeout:%v", + conn, inBackoff, inBackoffTimeout) + return + } + + count := r.perConnMaxInFlight() + r.log(LogLevelDebug, "(%s) sending RDY %d", conn, count) + r.updateRDY(conn, count) +} + +func (r *Consumer) rdyLoop() { + redistributeTicker := time.NewTicker(r.config.RDYRedistributeInterval) + + for { + select { + case <-redistributeTicker.C: + r.redistributeRDY() + case <-r.exitChan: + goto exit + } + } + +exit: + redistributeTicker.Stop() + r.log(LogLevelInfo, "rdyLoop exiting") + r.wg.Done() +} + +func (r *Consumer) updateRDY(c *Conn, count int64) error { + if c.IsClosing() { + return ErrClosing + } + + // never exceed the nsqd's configured max RDY count + if count > c.MaxRDY() { + count = c.MaxRDY() + } + + // stop any pending retry of an old RDY update + r.rdyRetryMtx.Lock() + if timer, ok := r.rdyRetryTimers[c.String()]; ok { + timer.Stop() + delete(r.rdyRetryTimers, c.String()) + } + r.rdyRetryMtx.Unlock() + + // never exceed our global max in flight. truncate if possible. + // this could help a new connection get partial max-in-flight + rdyCount := c.RDY() + maxPossibleRdy := int64(r.getMaxInFlight()) - atomic.LoadInt64(&r.totalRdyCount) + rdyCount + if maxPossibleRdy > 0 && maxPossibleRdy < count { + count = maxPossibleRdy + } + if maxPossibleRdy <= 0 && count > 0 { + if rdyCount == 0 { + // we wanted to exit a zero RDY count but we couldn't send it... + // in order to prevent eternal starvation we reschedule this attempt + // (if any other RDY update succeeds this timer will be stopped) + r.rdyRetryMtx.Lock() + r.rdyRetryTimers[c.String()] = time.AfterFunc(5*time.Second, + func() { + r.updateRDY(c, count) + }) + r.rdyRetryMtx.Unlock() + } + return ErrOverMaxInFlight + } + + return r.sendRDY(c, count) +} + +func (r *Consumer) sendRDY(c *Conn, count int64) error { + if count == 0 && c.LastRDY() == 0 { + // no need to send. It's already that RDY count + return nil + } + + atomic.AddInt64(&r.totalRdyCount, count-c.RDY()) + + lastRDY := c.LastRDY() + c.SetRDY(count) + if count == lastRDY { + return nil + } + + err := c.WriteCommand(Ready(int(count))) + if err != nil { + r.log(LogLevelError, "(%s) error sending RDY %d - %s", c.String(), count, err) + return err + } + return nil +} + +func (r *Consumer) redistributeRDY() { + if r.inBackoffTimeout() { + return + } + + // if an external heuristic set needRDYRedistributed we want to wait + // until we can actually redistribute to proceed + conns := r.conns() + if len(conns) == 0 { + return + } + + maxInFlight := r.getMaxInFlight() + if len(conns) > int(maxInFlight) { + r.log(LogLevelDebug, "redistributing RDY state (%d conns > %d max_in_flight)", + len(conns), maxInFlight) + atomic.StoreInt32(&r.needRDYRedistributed, 1) + } + + if r.inBackoff() && len(conns) > 1 { + r.log(LogLevelDebug, "redistributing RDY state (in backoff and %d conns > 1)", len(conns)) + atomic.StoreInt32(&r.needRDYRedistributed, 1) + } + + if !atomic.CompareAndSwapInt32(&r.needRDYRedistributed, 1, 0) { + return + } + + possibleConns := make([]*Conn, 0, len(conns)) + for _, c := range conns { + lastMsgDuration := time.Now().Sub(c.LastMessageTime()) + lastRdyDuration := time.Now().Sub(c.LastRdyTime()) + rdyCount := c.RDY() + r.log(LogLevelDebug, "(%s) rdy: %d (last message received %s)", + c.String(), rdyCount, lastMsgDuration) + if rdyCount > 0 { + if lastMsgDuration > r.config.LowRdyIdleTimeout { + r.log(LogLevelDebug, "(%s) idle connection, giving up RDY", c.String()) + r.updateRDY(c, 0) + } else if lastRdyDuration > r.config.LowRdyTimeout { + r.log(LogLevelDebug, "(%s) RDY timeout, giving up RDY", c.String()) + r.updateRDY(c, 0) + } + } + possibleConns = append(possibleConns, c) + } + + availableMaxInFlight := int64(maxInFlight) - atomic.LoadInt64(&r.totalRdyCount) + if r.inBackoff() { + availableMaxInFlight = 1 - atomic.LoadInt64(&r.totalRdyCount) + } + + for len(possibleConns) > 0 && availableMaxInFlight > 0 { + availableMaxInFlight-- + r.rngMtx.Lock() + i := r.rng.Int() % len(possibleConns) + r.rngMtx.Unlock() + c := possibleConns[i] + // delete + possibleConns = append(possibleConns[:i], possibleConns[i+1:]...) + r.log(LogLevelDebug, "(%s) redistributing RDY", c.String()) + r.updateRDY(c, 1) + } +} + +// Stop will initiate a graceful stop of the Consumer (permanent) +// +// NOTE: receive on StopChan to block until this process completes +func (r *Consumer) Stop() { + if !atomic.CompareAndSwapInt32(&r.stopFlag, 0, 1) { + return + } + + r.log(LogLevelInfo, "stopping...") + + if len(r.conns()) == 0 { + r.stopHandlers() + } else { + for _, c := range r.conns() { + err := c.WriteCommand(StartClose()) + if err != nil { + r.log(LogLevelError, "(%s) error sending CLS - %s", c.String(), err) + } + } + + time.AfterFunc(time.Second*30, func() { + // if we've waited this long handlers are blocked on processing messages + // so we can't just stopHandlers (if any adtl. messages were pending processing + // we would cause a panic on channel close) + // + // instead, we just bypass handler closing and skip to the final exit + r.exit() + }) + } +} + +func (r *Consumer) stopHandlers() { + r.stopHandler.Do(func() { + r.log(LogLevelInfo, "stopping handlers") + close(r.incomingMessages) + }) +} + +// AddHandler sets the Handler for messages received by this Consumer. This can be called +// multiple times to add additional handlers. Handler will have a 1:1 ratio to message handling goroutines. +// +// This panics if called after connecting to NSQD or NSQ Lookupd +// +// (see Handler or HandlerFunc for details on implementing this interface) +func (r *Consumer) AddHandler(handler Handler) { + r.AddConcurrentHandlers(handler, 1) +} + +// AddConcurrentHandlers sets the Handler for messages received by this Consumer. It +// takes a second argument which indicates the number of goroutines to spawn for +// message handling. +// +// This panics if called after connecting to NSQD or NSQ Lookupd +// +// (see Handler or HandlerFunc for details on implementing this interface) +func (r *Consumer) AddConcurrentHandlers(handler Handler, concurrency int) { + if atomic.LoadInt32(&r.connectedFlag) == 1 { + panic("already connected") + } + + atomic.AddInt32(&r.runningHandlers, int32(concurrency)) + for i := 0; i < concurrency; i++ { + go r.handlerLoop(handler) + } +} + +func (r *Consumer) handlerLoop(handler Handler) { + r.log(LogLevelDebug, "starting Handler") + + for { + message, ok := <-r.incomingMessages + if !ok { + goto exit + } + + if r.shouldFailMessage(message, handler) { + message.Finish() + continue + } + + err := handler.HandleMessage(message) + if err != nil { + r.log(LogLevelError, "Handler returned error (%s) for msg %s", err, message.ID) + if !message.IsAutoResponseDisabled() { + message.Requeue(-1) + } + continue + } + + if !message.IsAutoResponseDisabled() { + message.Finish() + } + } + +exit: + r.log(LogLevelDebug, "stopping Handler") + if atomic.AddInt32(&r.runningHandlers, -1) == 0 { + r.exit() + } +} + +func (r *Consumer) shouldFailMessage(message *Message, handler interface{}) bool { + // message passed the max number of attempts + if r.config.MaxAttempts > 0 && message.Attempts > r.config.MaxAttempts { + r.log(LogLevelWarning, "msg %s attempted %d times, giving up", + message.ID, message.Attempts) + + logger, ok := handler.(FailedMessageLogger) + if ok { + logger.LogFailedMessage(message) + } + + return true + } + return false +} + +func (r *Consumer) exit() { + r.exitHandler.Do(func() { + close(r.exitChan) + r.wg.Wait() + close(r.StopChan) + }) +} + +func (r *Consumer) log(lvl LogLevel, line string, args ...interface{}) { + logger, logLvl := r.getLogger(lvl) + + if logger == nil { + return + } + + if logLvl > lvl { + return + } + + logger.Output(2, fmt.Sprintf("%-4s %3d [%s/%s] %s", + lvl, r.id, r.topic, r.channel, + fmt.Sprintf(line, args...))) +} + +func buildLookupAddr(addr, topic string) (string, error) { + urlString := addr + if !strings.Contains(urlString, "://") { + urlString = "http://" + addr + } + + u, err := url.Parse(urlString) + if err != nil { + return "", err + } + + if u.Port() == "" { + return "", errors.New("missing port") + } + + if u.Path == "/" || u.Path == "" { + u.Path = "/lookup" + } + + v, err := url.ParseQuery(u.RawQuery) + v.Add("topic", topic) + u.RawQuery = v.Encode() + return u.String(), nil +} diff --git a/vendor/github.com/nsqio/go-nsq/delegates.go b/vendor/github.com/nsqio/go-nsq/delegates.go new file mode 100644 index 00000000..aca72529 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/delegates.go @@ -0,0 +1,139 @@ +package nsq + +import "time" + +type logger interface { + Output(calldepth int, s string) error +} + +// LogLevel specifies the severity of a given log message +type LogLevel int + +// Log levels +const ( + LogLevelDebug LogLevel = iota + LogLevelInfo + LogLevelWarning + LogLevelError + LogLevelMax = iota - 1 // convenience - match highest log level +) + +// String returns the string form for a given LogLevel +func (lvl LogLevel) String() string { + switch lvl { + case LogLevelInfo: + return "INF" + case LogLevelWarning: + return "WRN" + case LogLevelError: + return "ERR" + } + return "DBG" +} + +// MessageDelegate is an interface of methods that are used as +// callbacks in Message +type MessageDelegate interface { + // OnFinish is called when the Finish() method + // is triggered on the Message + OnFinish(*Message) + + // OnRequeue is called when the Requeue() method + // is triggered on the Message + OnRequeue(m *Message, delay time.Duration, backoff bool) + + // OnTouch is called when the Touch() method + // is triggered on the Message + OnTouch(*Message) +} + +type connMessageDelegate struct { + c *Conn +} + +func (d *connMessageDelegate) OnFinish(m *Message) { d.c.onMessageFinish(m) } +func (d *connMessageDelegate) OnRequeue(m *Message, t time.Duration, b bool) { + d.c.onMessageRequeue(m, t, b) +} +func (d *connMessageDelegate) OnTouch(m *Message) { d.c.onMessageTouch(m) } + +// ConnDelegate is an interface of methods that are used as +// callbacks in Conn +type ConnDelegate interface { + // OnResponse is called when the connection + // receives a FrameTypeResponse from nsqd + OnResponse(*Conn, []byte) + + // OnError is called when the connection + // receives a FrameTypeError from nsqd + OnError(*Conn, []byte) + + // OnMessage is called when the connection + // receives a FrameTypeMessage from nsqd + OnMessage(*Conn, *Message) + + // OnMessageFinished is called when the connection + // handles a FIN command from a message handler + OnMessageFinished(*Conn, *Message) + + // OnMessageRequeued is called when the connection + // handles a REQ command from a message handler + OnMessageRequeued(*Conn, *Message) + + // OnBackoff is called when the connection triggers a backoff state + OnBackoff(*Conn) + + // OnContinue is called when the connection finishes a message without adjusting backoff state + OnContinue(*Conn) + + // OnResume is called when the connection triggers a resume state + OnResume(*Conn) + + // OnIOError is called when the connection experiences + // a low-level TCP transport error + OnIOError(*Conn, error) + + // OnHeartbeat is called when the connection + // receives a heartbeat from nsqd + OnHeartbeat(*Conn) + + // OnClose is called when the connection + // closes, after all cleanup + OnClose(*Conn) +} + +// keeps the exported Consumer struct clean of the exported methods +// required to implement the ConnDelegate interface +type consumerConnDelegate struct { + r *Consumer +} + +func (d *consumerConnDelegate) OnResponse(c *Conn, data []byte) { d.r.onConnResponse(c, data) } +func (d *consumerConnDelegate) OnError(c *Conn, data []byte) { d.r.onConnError(c, data) } +func (d *consumerConnDelegate) OnMessage(c *Conn, m *Message) { d.r.onConnMessage(c, m) } +func (d *consumerConnDelegate) OnMessageFinished(c *Conn, m *Message) { d.r.onConnMessageFinished(c, m) } +func (d *consumerConnDelegate) OnMessageRequeued(c *Conn, m *Message) { d.r.onConnMessageRequeued(c, m) } +func (d *consumerConnDelegate) OnBackoff(c *Conn) { d.r.onConnBackoff(c) } +func (d *consumerConnDelegate) OnContinue(c *Conn) { d.r.onConnContinue(c) } +func (d *consumerConnDelegate) OnResume(c *Conn) { d.r.onConnResume(c) } +func (d *consumerConnDelegate) OnIOError(c *Conn, err error) { d.r.onConnIOError(c, err) } +func (d *consumerConnDelegate) OnHeartbeat(c *Conn) { d.r.onConnHeartbeat(c) } +func (d *consumerConnDelegate) OnClose(c *Conn) { d.r.onConnClose(c) } + +// keeps the exported Producer struct clean of the exported methods +// required to implement the ConnDelegate interface +type producerConnDelegate struct { + w *Producer +} + +func (d *producerConnDelegate) OnResponse(c *Conn, data []byte) { d.w.onConnResponse(c, data) } +func (d *producerConnDelegate) OnError(c *Conn, data []byte) { d.w.onConnError(c, data) } +func (d *producerConnDelegate) OnMessage(c *Conn, m *Message) {} +func (d *producerConnDelegate) OnMessageFinished(c *Conn, m *Message) {} +func (d *producerConnDelegate) OnMessageRequeued(c *Conn, m *Message) {} +func (d *producerConnDelegate) OnBackoff(c *Conn) {} +func (d *producerConnDelegate) OnContinue(c *Conn) {} +func (d *producerConnDelegate) OnResume(c *Conn) {} +func (d *producerConnDelegate) OnIOError(c *Conn, err error) { d.w.onConnIOError(c, err) } +func (d *producerConnDelegate) OnHeartbeat(c *Conn) { d.w.onConnHeartbeat(c) } +func (d *producerConnDelegate) OnClose(c *Conn) { d.w.onConnClose(c) } diff --git a/vendor/github.com/nsqio/go-nsq/doc.go b/vendor/github.com/nsqio/go-nsq/doc.go new file mode 100644 index 00000000..85cc30c6 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/doc.go @@ -0,0 +1,88 @@ +/* +Package nsq is the official Go package for NSQ (http://nsq.io/). + +It provides high-level Consumer and Producer types as well as low-level +functions to communicate over the NSQ protocol. + +Consumer + +Consuming messages from NSQ can be done by creating an instance of a Consumer and supplying it a handler. + + package main + import ( + "log" + "os/signal" + "github.com/nsqio/go-nsq" + ) + + type myMessageHandler struct {} + + // HandleMessage implements the Handler interface. + func (h *myMessageHandler) HandleMessage(m *nsq.Message) error { + if len(m.Body) == 0 { + // Returning nil will automatically send a FIN command to NSQ to mark the message as processed. + // In this case, a message with an empty body is simply ignored/discarded. + return nil + } + + // do whatever actual message processing is desired + err := processMessage(m.Body) + + // Returning a non-nil error will automatically send a REQ command to NSQ to re-queue the message. + return err + } + + func main() { + // Instantiate a consumer that will subscribe to the provided channel. + config := nsq.NewConfig() + consumer, err := nsq.NewConsumer("topic", "channel", config) + if err != nil { + log.Fatal(err) + } + + // Set the Handler for messages received by this Consumer. Can be called multiple times. + // See also AddConcurrentHandlers. + consumer.AddHandler(&myMessageHandler{}) + + // Use nsqlookupd to discover nsqd instances. + // See also ConnectToNSQD, ConnectToNSQDs, ConnectToNSQLookupds. + err = consumer.ConnectToNSQLookupd("localhost:4161") + if err != nil { + log.Fatal(err) + } + + // wait for signal to exit + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + <-sigChan + + // Gracefully stop the consumer. + consumer.Stop() + } + +Producer + +Producing messages can be done by creating an instance of a Producer. + + // Instantiate a producer. + config := nsq.NewConfig() + producer, err := nsq.NewProducer("127.0.0.1:4150", config) + if err != nil { + log.Fatal(err) + } + + messageBody := []byte("hello") + topicName := "topic" + + // Synchronously publish a single message to the specified topic. + // Messages can also be sent asynchronously and/or in batches. + err = producer.Publish(topicName, messageBody) + if err != nil { + log.Fatal(err) + } + + // Gracefully stop the producer when appropriate (e.g. before shutting down the service) + producer.Stop() + +*/ +package nsq diff --git a/vendor/github.com/nsqio/go-nsq/errors.go b/vendor/github.com/nsqio/go-nsq/errors.go new file mode 100644 index 00000000..2f228d10 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/errors.go @@ -0,0 +1,44 @@ +package nsq + +import ( + "errors" + "fmt" +) + +// ErrNotConnected is returned when a publish command is made +// against a Producer that is not connected +var ErrNotConnected = errors.New("not connected") + +// ErrStopped is returned when a publish command is +// made against a Producer that has been stopped +var ErrStopped = errors.New("stopped") + +// ErrClosing is returned when a connection is closing +var ErrClosing = errors.New("closing") + +// ErrAlreadyConnected is returned from ConnectToNSQD when already connected +var ErrAlreadyConnected = errors.New("already connected") + +// ErrOverMaxInFlight is returned from Consumer if over max-in-flight +var ErrOverMaxInFlight = errors.New("over configure max-inflight") + +// ErrIdentify is returned from Conn as part of the IDENTIFY handshake +type ErrIdentify struct { + Reason string +} + +// Error returns a stringified error +func (e ErrIdentify) Error() string { + return fmt.Sprintf("failed to IDENTIFY - %s", e.Reason) +} + +// ErrProtocol is returned from Producer when encountering +// an NSQ protocol level error +type ErrProtocol struct { + Reason string +} + +// Error returns a stringified error +func (e ErrProtocol) Error() string { + return e.Reason +} diff --git a/vendor/github.com/nsqio/go-nsq/message.go b/vendor/github.com/nsqio/go-nsq/message.go new file mode 100644 index 00000000..b496451d --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/message.go @@ -0,0 +1,164 @@ +package nsq + +import ( + "encoding/binary" + "errors" + "io" + "sync/atomic" + "time" +) + +// The number of bytes for a Message.ID +const MsgIDLength = 16 + +// MessageID is the ASCII encoded hexadecimal message ID +type MessageID [MsgIDLength]byte + +// Message is the fundamental data type containing +// the id, body, and metadata +type Message struct { + ID MessageID + Body []byte + Timestamp int64 + Attempts uint16 + + NSQDAddress string + + Delegate MessageDelegate + + autoResponseDisabled int32 + responded int32 +} + +// NewMessage creates a Message, initializes some metadata, +// and returns a pointer +func NewMessage(id MessageID, body []byte) *Message { + return &Message{ + ID: id, + Body: body, + Timestamp: time.Now().UnixNano(), + } +} + +// DisableAutoResponse disables the automatic response that +// would normally be sent when a handler.HandleMessage +// returns (FIN/REQ based on the error value returned). +// +// This is useful if you want to batch, buffer, or asynchronously +// respond to messages. +func (m *Message) DisableAutoResponse() { + atomic.StoreInt32(&m.autoResponseDisabled, 1) +} + +// IsAutoResponseDisabled indicates whether or not this message +// will be responded to automatically +func (m *Message) IsAutoResponseDisabled() bool { + return atomic.LoadInt32(&m.autoResponseDisabled) == 1 +} + +// HasResponded indicates whether or not this message has been responded to +func (m *Message) HasResponded() bool { + return atomic.LoadInt32(&m.responded) == 1 +} + +// Finish sends a FIN command to the nsqd which +// sent this message +func (m *Message) Finish() { + if !atomic.CompareAndSwapInt32(&m.responded, 0, 1) { + return + } + m.Delegate.OnFinish(m) +} + +// Touch sends a TOUCH command to the nsqd which +// sent this message +func (m *Message) Touch() { + if m.HasResponded() { + return + } + m.Delegate.OnTouch(m) +} + +// Requeue sends a REQ command to the nsqd which +// sent this message, using the supplied delay. +// +// A delay of -1 will automatically calculate +// based on the number of attempts and the +// configured default_requeue_delay +func (m *Message) Requeue(delay time.Duration) { + m.doRequeue(delay, true) +} + +// RequeueWithoutBackoff sends a REQ command to the nsqd which +// sent this message, using the supplied delay. +// +// Notably, using this method to respond does not trigger a backoff +// event on the configured Delegate. +func (m *Message) RequeueWithoutBackoff(delay time.Duration) { + m.doRequeue(delay, false) +} + +func (m *Message) doRequeue(delay time.Duration, backoff bool) { + if !atomic.CompareAndSwapInt32(&m.responded, 0, 1) { + return + } + m.Delegate.OnRequeue(m, delay, backoff) +} + +// WriteTo implements the WriterTo interface and serializes +// the message into the supplied producer. +// +// It is suggested that the target Writer is buffered to +// avoid performing many system calls. +func (m *Message) WriteTo(w io.Writer) (int64, error) { + var buf [10]byte + var total int64 + + binary.BigEndian.PutUint64(buf[:8], uint64(m.Timestamp)) + binary.BigEndian.PutUint16(buf[8:10], uint16(m.Attempts)) + + n, err := w.Write(buf[:]) + total += int64(n) + if err != nil { + return total, err + } + + n, err = w.Write(m.ID[:]) + total += int64(n) + if err != nil { + return total, err + } + + n, err = w.Write(m.Body) + total += int64(n) + if err != nil { + return total, err + } + + return total, nil +} + +// DecodeMessage deserializes data (as []byte) and creates a new Message +// message format: +// [x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x][x]... +// | (int64) || || (hex string encoded in ASCII) || (binary) +// | 8-byte || || 16-byte || N-byte +// ------------------------------------------------------------------------------------------... +// nanosecond timestamp ^^ message ID message body +// (uint16) +// 2-byte +// attempts +func DecodeMessage(b []byte) (*Message, error) { + var msg Message + + if len(b) < 10+MsgIDLength { + return nil, errors.New("not enough data to decode valid message") + } + + msg.Timestamp = int64(binary.BigEndian.Uint64(b[:8])) + msg.Attempts = binary.BigEndian.Uint16(b[8:10]) + copy(msg.ID[:], b[10:10+MsgIDLength]) + msg.Body = b[10+MsgIDLength:] + + return &msg, nil +} diff --git a/vendor/github.com/nsqio/go-nsq/producer.go b/vendor/github.com/nsqio/go-nsq/producer.go new file mode 100644 index 00000000..20fd0c87 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/producer.go @@ -0,0 +1,427 @@ +package nsq + +import ( + "fmt" + "log" + "os" + "sync" + "sync/atomic" + "time" +) + +type producerConn interface { + String() string + SetLogger(logger, LogLevel, string) + SetLoggerLevel(LogLevel) + SetLoggerForLevel(logger, LogLevel, string) + Connect() (*IdentifyResponse, error) + Close() error + WriteCommand(*Command) error +} + +// Producer is a high-level type to publish to NSQ. +// +// A Producer instance is 1:1 with a destination `nsqd` +// and will lazily connect to that instance (and re-connect) +// when Publish commands are executed. +type Producer struct { + id int64 + addr string + conn producerConn + config Config + + logger []logger + logLvl LogLevel + logGuard sync.RWMutex + + responseChan chan []byte + errorChan chan []byte + closeChan chan int + + transactionChan chan *ProducerTransaction + transactions []*ProducerTransaction + state int32 + + concurrentProducers int32 + stopFlag int32 + exitChan chan int + wg sync.WaitGroup + guard sync.Mutex +} + +// ProducerTransaction is returned by the async publish methods +// to retrieve metadata about the command after the +// response is received. +type ProducerTransaction struct { + cmd *Command + doneChan chan *ProducerTransaction + Error error // the error (or nil) of the publish command + Args []interface{} // the slice of variadic arguments passed to PublishAsync or MultiPublishAsync +} + +func (t *ProducerTransaction) finish() { + if t.doneChan != nil { + t.doneChan <- t + } +} + +// NewProducer returns an instance of Producer for the specified address +// +// The only valid way to create a Config is via NewConfig, using a struct literal will panic. +// After Config is passed into NewProducer the values are no longer mutable (they are copied). +func NewProducer(addr string, config *Config) (*Producer, error) { + err := config.Validate() + if err != nil { + return nil, err + } + + p := &Producer{ + id: atomic.AddInt64(&instCount, 1), + + addr: addr, + config: *config, + + logger: make([]logger, int(LogLevelMax+1)), + logLvl: LogLevelInfo, + + transactionChan: make(chan *ProducerTransaction), + exitChan: make(chan int), + responseChan: make(chan []byte), + errorChan: make(chan []byte), + } + + // Set default logger for all log levels + l := log.New(os.Stderr, "", log.Flags()) + for index, _ := range p.logger { + p.logger[index] = l + } + return p, nil +} + +// Ping causes the Producer to connect to it's configured nsqd (if not already +// connected) and send a `Nop` command, returning any error that might occur. +// +// This method can be used to verify that a newly-created Producer instance is +// configured correctly, rather than relying on the lazy "connect on Publish" +// behavior of a Producer. +func (w *Producer) Ping() error { + if atomic.LoadInt32(&w.state) != StateConnected { + err := w.connect() + if err != nil { + return err + } + } + + return w.conn.WriteCommand(Nop()) +} + +// SetLogger assigns the logger to use as well as a level +// +// The logger parameter is an interface that requires the following +// method to be implemented (such as the the stdlib log.Logger): +// +// Output(calldepth int, s string) +// +func (w *Producer) SetLogger(l logger, lvl LogLevel) { + w.logGuard.Lock() + defer w.logGuard.Unlock() + + for level := range w.logger { + w.logger[level] = l + } + w.logLvl = lvl +} + +// SetLoggerForLevel assigns the same logger for specified `level`. +func (w *Producer) SetLoggerForLevel(l logger, lvl LogLevel) { + w.logGuard.Lock() + defer w.logGuard.Unlock() + + w.logger[lvl] = l +} + +// SetLoggerLevel sets the package logging level. +func (w *Producer) SetLoggerLevel(lvl LogLevel) { + w.logGuard.Lock() + defer w.logGuard.Unlock() + + w.logLvl = lvl +} + +func (w *Producer) getLogger(lvl LogLevel) (logger, LogLevel) { + w.logGuard.RLock() + defer w.logGuard.RUnlock() + + return w.logger[lvl], w.logLvl +} + +func (w *Producer) getLogLevel() LogLevel { + w.logGuard.RLock() + defer w.logGuard.RUnlock() + + return w.logLvl +} + +// String returns the address of the Producer +func (w *Producer) String() string { + return w.addr +} + +// Stop initiates a graceful stop of the Producer (permanent) +// +// NOTE: this blocks until completion +func (w *Producer) Stop() { + w.guard.Lock() + if !atomic.CompareAndSwapInt32(&w.stopFlag, 0, 1) { + w.guard.Unlock() + return + } + w.log(LogLevelInfo, "(%s) stopping", w.addr) + close(w.exitChan) + w.close() + w.guard.Unlock() + w.wg.Wait() +} + +// PublishAsync publishes a message body to the specified topic +// but does not wait for the response from `nsqd`. +// +// When the Producer eventually receives the response from `nsqd`, +// the supplied `doneChan` (if specified) +// will receive a `ProducerTransaction` instance with the supplied variadic arguments +// and the response error if present +func (w *Producer) PublishAsync(topic string, body []byte, doneChan chan *ProducerTransaction, + args ...interface{}) error { + return w.sendCommandAsync(Publish(topic, body), doneChan, args) +} + +// MultiPublishAsync publishes a slice of message bodies to the specified topic +// but does not wait for the response from `nsqd`. +// +// When the Producer eventually receives the response from `nsqd`, +// the supplied `doneChan` (if specified) +// will receive a `ProducerTransaction` instance with the supplied variadic arguments +// and the response error if present +func (w *Producer) MultiPublishAsync(topic string, body [][]byte, doneChan chan *ProducerTransaction, + args ...interface{}) error { + cmd, err := MultiPublish(topic, body) + if err != nil { + return err + } + return w.sendCommandAsync(cmd, doneChan, args) +} + +// Publish synchronously publishes a message body to the specified topic, returning +// an error if publish failed +func (w *Producer) Publish(topic string, body []byte) error { + return w.sendCommand(Publish(topic, body)) +} + +// MultiPublish synchronously publishes a slice of message bodies to the specified topic, returning +// an error if publish failed +func (w *Producer) MultiPublish(topic string, body [][]byte) error { + cmd, err := MultiPublish(topic, body) + if err != nil { + return err + } + return w.sendCommand(cmd) +} + +// DeferredPublish synchronously publishes a message body to the specified topic +// where the message will queue at the channel level until the timeout expires, returning +// an error if publish failed +func (w *Producer) DeferredPublish(topic string, delay time.Duration, body []byte) error { + return w.sendCommand(DeferredPublish(topic, delay, body)) +} + +// DeferredPublishAsync publishes a message body to the specified topic +// where the message will queue at the channel level until the timeout expires +// but does not wait for the response from `nsqd`. +// +// When the Producer eventually receives the response from `nsqd`, +// the supplied `doneChan` (if specified) +// will receive a `ProducerTransaction` instance with the supplied variadic arguments +// and the response error if present +func (w *Producer) DeferredPublishAsync(topic string, delay time.Duration, body []byte, + doneChan chan *ProducerTransaction, args ...interface{}) error { + return w.sendCommandAsync(DeferredPublish(topic, delay, body), doneChan, args) +} + +func (w *Producer) sendCommand(cmd *Command) error { + doneChan := make(chan *ProducerTransaction) + err := w.sendCommandAsync(cmd, doneChan, nil) + if err != nil { + close(doneChan) + return err + } + t := <-doneChan + return t.Error +} + +func (w *Producer) sendCommandAsync(cmd *Command, doneChan chan *ProducerTransaction, + args []interface{}) error { + // keep track of how many outstanding producers we're dealing with + // in order to later ensure that we clean them all up... + atomic.AddInt32(&w.concurrentProducers, 1) + defer atomic.AddInt32(&w.concurrentProducers, -1) + + if atomic.LoadInt32(&w.state) != StateConnected { + err := w.connect() + if err != nil { + return err + } + } + + t := &ProducerTransaction{ + cmd: cmd, + doneChan: doneChan, + Args: args, + } + + select { + case w.transactionChan <- t: + case <-w.exitChan: + return ErrStopped + } + + return nil +} + +func (w *Producer) connect() error { + w.guard.Lock() + defer w.guard.Unlock() + + if atomic.LoadInt32(&w.stopFlag) == 1 { + return ErrStopped + } + + state := atomic.LoadInt32(&w.state) + switch { + case state == StateConnected: + return nil + case state != StateInit: + return ErrNotConnected + } + + w.log(LogLevelInfo, "(%s) connecting to nsqd", w.addr) + + w.conn = NewConn(w.addr, &w.config, &producerConnDelegate{w}) + w.conn.SetLoggerLevel(w.getLogLevel()) + format := fmt.Sprintf("%3d (%%s)", w.id) + for index := range w.logger { + w.conn.SetLoggerForLevel(w.logger[index], LogLevel(index), format) + } + + _, err := w.conn.Connect() + if err != nil { + w.conn.Close() + w.log(LogLevelError, "(%s) error connecting to nsqd - %s", w.addr, err) + return err + } + atomic.StoreInt32(&w.state, StateConnected) + w.closeChan = make(chan int) + w.wg.Add(1) + go w.router() + + return nil +} + +func (w *Producer) close() { + if !atomic.CompareAndSwapInt32(&w.state, StateConnected, StateDisconnected) { + return + } + w.conn.Close() + go func() { + // we need to handle this in a goroutine so we don't + // block the caller from making progress + w.wg.Wait() + atomic.StoreInt32(&w.state, StateInit) + }() +} + +func (w *Producer) router() { + for { + select { + case t := <-w.transactionChan: + w.transactions = append(w.transactions, t) + err := w.conn.WriteCommand(t.cmd) + if err != nil { + w.log(LogLevelError, "(%s) sending command - %s", w.conn.String(), err) + w.close() + } + case data := <-w.responseChan: + w.popTransaction(FrameTypeResponse, data) + case data := <-w.errorChan: + w.popTransaction(FrameTypeError, data) + case <-w.closeChan: + goto exit + case <-w.exitChan: + goto exit + } + } + +exit: + w.transactionCleanup() + w.wg.Done() + w.log(LogLevelInfo, "(%s) exiting router", w.conn.String()) +} + +func (w *Producer) popTransaction(frameType int32, data []byte) { + t := w.transactions[0] + w.transactions = w.transactions[1:] + if frameType == FrameTypeError { + t.Error = ErrProtocol{string(data)} + } + t.finish() +} + +func (w *Producer) transactionCleanup() { + // clean up transactions we can easily account for + for _, t := range w.transactions { + t.Error = ErrNotConnected + t.finish() + } + w.transactions = w.transactions[:0] + + // spin and free up any writes that might have raced + // with the cleanup process (blocked on writing + // to transactionChan) + for { + select { + case t := <-w.transactionChan: + t.Error = ErrNotConnected + t.finish() + default: + // keep spinning until there are 0 concurrent producers + if atomic.LoadInt32(&w.concurrentProducers) == 0 { + return + } + // give the runtime a chance to schedule other racing goroutines + time.Sleep(5 * time.Millisecond) + } + } +} + +func (w *Producer) log(lvl LogLevel, line string, args ...interface{}) { + logger, logLvl := w.getLogger(lvl) + + if logger == nil { + return + } + + if logLvl > lvl { + return + } + + logger.Output(2, fmt.Sprintf("%-4s %3d %s", lvl, w.id, fmt.Sprintf(line, args...))) +} + +func (w *Producer) onConnResponse(c *Conn, data []byte) { w.responseChan <- data } +func (w *Producer) onConnError(c *Conn, data []byte) { w.errorChan <- data } +func (w *Producer) onConnHeartbeat(c *Conn) {} +func (w *Producer) onConnIOError(c *Conn, err error) { w.close() } +func (w *Producer) onConnClose(c *Conn) { + w.guard.Lock() + defer w.guard.Unlock() + close(w.closeChan) +} diff --git a/vendor/github.com/nsqio/go-nsq/protocol.go b/vendor/github.com/nsqio/go-nsq/protocol.go new file mode 100644 index 00000000..1d20851b --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/protocol.go @@ -0,0 +1,100 @@ +package nsq + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "regexp" +) + +// MagicV1 is the initial identifier sent when connecting for V1 clients +var MagicV1 = []byte(" V1") + +// MagicV2 is the initial identifier sent when connecting for V2 clients +var MagicV2 = []byte(" V2") + +// frame types +const ( + FrameTypeResponse int32 = 0 + FrameTypeError int32 = 1 + FrameTypeMessage int32 = 2 +) + +var validTopicChannelNameRegex = regexp.MustCompile(`^[\.a-zA-Z0-9_-]+(#ephemeral)?$`) + +// IsValidTopicName checks a topic name for correctness +func IsValidTopicName(name string) bool { + return isValidName(name) +} + +// IsValidChannelName checks a channel name for correctness +func IsValidChannelName(name string) bool { + return isValidName(name) +} + +func isValidName(name string) bool { + if len(name) > 64 || len(name) < 1 { + return false + } + return validTopicChannelNameRegex.MatchString(name) +} + +// ReadResponse is a client-side utility function to read from the supplied Reader +// according to the NSQ protocol spec: +// +// [x][x][x][x][x][x][x][x]... +// | (int32) || (binary) +// | 4-byte || N-byte +// ------------------------... +// size data +func ReadResponse(r io.Reader) ([]byte, error) { + var msgSize int32 + + // message size + err := binary.Read(r, binary.BigEndian, &msgSize) + if err != nil { + return nil, err + } + + if msgSize < 0 { + return nil, fmt.Errorf("response msg size is negative: %v", msgSize) + } + // message binary data + buf := make([]byte, msgSize) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +// UnpackResponse is a client-side utility function that unpacks serialized data +// according to NSQ protocol spec: +// +// [x][x][x][x][x][x][x][x]... +// | (int32) || (binary) +// | 4-byte || N-byte +// ------------------------... +// frame ID data +// +// Returns a triplicate of: frame type, data ([]byte), error +func UnpackResponse(response []byte) (int32, []byte, error) { + if len(response) < 4 { + return -1, nil, errors.New("length of response is too small") + } + + return int32(binary.BigEndian.Uint32(response)), response[4:], nil +} + +// ReadUnpackedResponse reads and parses data from the underlying +// TCP connection according to the NSQ TCP protocol spec and +// returns the frameType, data or error +func ReadUnpackedResponse(r io.Reader) (int32, []byte, error) { + resp, err := ReadResponse(r) + if err != nil { + return -1, nil, err + } + return UnpackResponse(resp) +} diff --git a/vendor/github.com/nsqio/go-nsq/states.go b/vendor/github.com/nsqio/go-nsq/states.go new file mode 100644 index 00000000..0db0291b --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/states.go @@ -0,0 +1,8 @@ +package nsq + +// states +const ( + StateInit = iota + StateDisconnected + StateConnected +) diff --git a/vendor/github.com/nsqio/go-nsq/version.go b/vendor/github.com/nsqio/go-nsq/version.go new file mode 100644 index 00000000..0cb342fa --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/version.go @@ -0,0 +1,4 @@ +package nsq + +// VERSION +const VERSION = "1.1.0" diff --git a/vendor/github.com/shenghui0779/vitess_pool/.gitignore b/vendor/github.com/shenghui0779/vitess_pool/.gitignore new file mode 100644 index 00000000..66fd13c9 --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/shenghui0779/vitess_pool/LICENSE b/vendor/github.com/shenghui0779/vitess_pool/LICENSE new file mode 100644 index 00000000..2e773fac --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 shenghui + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/shenghui0779/vitess_pool/README.md b/vendor/github.com/shenghui0779/vitess_pool/README.md new file mode 100644 index 00000000..ae1d5bda --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/README.md @@ -0,0 +1,4 @@ +# vitess_pool + +Connection pool for Go. +It's based on [vitess resource pool](https://github.com/vitessio/vitess/tree/master/go/pools). \ No newline at end of file diff --git a/vendor/github.com/shenghui0779/vitess_pool/atomic.go b/vendor/github.com/shenghui0779/vitess_pool/atomic.go new file mode 100644 index 00000000..2f806649 --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/atomic.go @@ -0,0 +1,66 @@ +package vitess_pool + +import ( + "sync/atomic" + "time" +) + +// AtomicInt64 is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int64 functions. +type AtomicInt64 struct { + int64 +} + +// NewAtomicInt64 initializes a new AtomicInt64 with a given value. +func NewAtomicInt64(n int64) AtomicInt64 { + return AtomicInt64{n} +} + +// Add atomically adds n to the value. +func (i *AtomicInt64) Add(n int64) int64 { + return atomic.AddInt64(&i.int64, n) +} + +// Set atomically sets n as new value. +func (i *AtomicInt64) Set(n int64) { + atomic.StoreInt64(&i.int64, n) +} + +// Get atomically returns the current value. +func (i *AtomicInt64) Get() int64 { + return atomic.LoadInt64(&i.int64) +} + +// CompareAndSwap automatically swaps the old with the new value. +func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) bool { + return atomic.CompareAndSwapInt64(&i.int64, oldval, newval) +} + +// AtomicDuration is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int64 functions. +type AtomicDuration struct { + int64 +} + +// NewAtomicDuration initializes a new AtomicDuration with a given value. +func NewAtomicDuration(duration time.Duration) AtomicDuration { + return AtomicDuration{int64(duration)} +} + +// Add atomically adds duration to the value. +func (d *AtomicDuration) Add(duration time.Duration) time.Duration { + return time.Duration(atomic.AddInt64(&d.int64, int64(duration))) +} + +// Set atomically sets duration as new value. +func (d *AtomicDuration) Set(duration time.Duration) { + atomic.StoreInt64(&d.int64, int64(duration)) +} + +// Get atomically returns the current value. +func (d *AtomicDuration) Get() time.Duration { + return time.Duration(atomic.LoadInt64(&d.int64)) +} + +// CompareAndSwap automatically swaps the old with the new value. +func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) bool { + return atomic.CompareAndSwapInt64(&d.int64, int64(oldval), int64(newval)) +} diff --git a/vendor/github.com/shenghui0779/vitess_pool/doc.go b/vendor/github.com/shenghui0779/vitess_pool/doc.go new file mode 100644 index 00000000..2f2c271d --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/doc.go @@ -0,0 +1,3 @@ +// Package vitess_pool provides functionality to manage and reuse resources like connections. +// It's based on vitess resource pool (https://github.com/vitessio/vitess/tree/master/go/pools). +package vitess_pool diff --git a/vendor/github.com/shenghui0779/vitess_pool/pool.go b/vendor/github.com/shenghui0779/vitess_pool/pool.go new file mode 100644 index 00000000..deadffab --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/pool.go @@ -0,0 +1,400 @@ +package vitess_pool + +import ( + "context" + "errors" + "fmt" + "sync" + "time" +) + +var ( + // ErrClosed is returned if ResourcePool is used when it's closed. + ErrClosed = errors.New("resource pool is closed") + + // ErrTimeout is returned if a resource get times out. + ErrTimeout = errors.New("resource pool timed out") + + // ErrCtxTimeout is returned if a ctx is already expired by the time the resource pool is used. + ErrCtxTimeout = errors.New("resource pool context already expired") + + prefillTimeout = 30 * time.Second +) + +// Factory is a function that can be used to create a resource. +type Factory func() (Resource, error) + +// Resource defines the interface that every resource must provide. +// Thread synchronization between Close() and IsClosed() is the responsibility of the caller. +type Resource interface { + Close() +} + +// ResourcePool allows you to use a pool of resources. +type ResourcePool struct { + // stats. Atomic fields must remain at the top in order to prevent panics on certain architectures. + available AtomicInt64 + active AtomicInt64 + inUse AtomicInt64 + waitCount AtomicInt64 + waitTime AtomicDuration + idleClosed AtomicInt64 + exhausted AtomicInt64 + + capacity AtomicInt64 + idleTimeout AtomicDuration + + resources chan resourceWrapper + factory Factory + idleTimer *Timer +} + +type resourceWrapper struct { + resource Resource + timeUsed time.Time +} + +// NewResourcePool creates a new ResourcePool pool. +// capacity is the number of possible resources in the pool: +// there can be up to 'capacity' of these at a given time. +// maxCap specifies the extent to which the pool can be resized in the future through the SetCapacity function. +// You cannot resize the pool beyond maxCap. +// If a resource is unused beyond idleTimeout, it's replaced with a new one. +// An idleTimeout of 0 means that there is no timeout. +// A non-zero value of prefillParallelism causes the pool to be pre-filled. +// The value specifies how many resources can be opened in parallel. +func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration, prefillParallelism int) *ResourcePool { + if capacity <= 0 || maxCap <= 0 || capacity > maxCap { + panic(errors.New("invalid/out of range capacity")) + } + + rp := &ResourcePool{ + resources: make(chan resourceWrapper, maxCap), + factory: factory, + available: NewAtomicInt64(int64(capacity)), + capacity: NewAtomicInt64(int64(capacity)), + idleTimeout: NewAtomicDuration(idleTimeout), + } + + for i := 0; i < capacity; i++ { + rp.resources <- resourceWrapper{} + } + + ctx, cancel := context.WithTimeout(context.TODO(), prefillTimeout) + defer cancel() + + if prefillParallelism != 0 { + sem := NewSemaphore(prefillParallelism, 0 /* timeout */) + + var wg sync.WaitGroup + + for i := 0; i < capacity; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + _ = sem.Acquire() + defer sem.Release() + + // If context has expired, give up. + select { + case <-ctx.Done(): + return + default: + } + + r, err := rp.Get(ctx) + + if err != nil { + return + } + + rp.Put(r) + }() + } + + wg.Wait() + } + + if idleTimeout != 0 { + rp.idleTimer = NewTimer(idleTimeout / 10) + rp.idleTimer.Start(rp.closeIdleResources) + } + + return rp +} + +// Close empties the pool calling Close on all its resources. +// You can call Close while there are outstanding resources. +// It waits for all resources to be returned (Put). +// After a Close, Get is not allowed. +func (rp *ResourcePool) Close() { + if rp.idleTimer != nil { + rp.idleTimer.Stop() + } + + _ = rp.SetCapacity(0) +} + +// IsClosed returns true if the resource pool is closed. +func (rp *ResourcePool) IsClosed() (closed bool) { + return rp.capacity.Get() == 0 +} + +// closeIdleResources scans the pool for idle resources. +func (rp *ResourcePool) closeIdleResources() { + available := int(rp.Available()) + idleTimeout := rp.IdleTimeout() + + for i := 0; i < available; i++ { + var wrapper resourceWrapper + + select { + case wrapper = <-rp.resources: + default: + // stop early if we don't get anything new from the pool. + return + } + + func() { + defer func() { rp.resources <- wrapper }() + + if wrapper.resource != nil && idleTimeout > 0 && time.Until(wrapper.timeUsed.Add(idleTimeout)) < 0 { + wrapper.resource.Close() + + rp.idleClosed.Add(1) + rp.reopenResource(&wrapper) + } + }() + + } +} + +// Get will return the next available resource. +// If capacity has not been reached, it will create a new one using the factory. Otherwise, +// it will wait till the next resource becomes available or a timeout. +// A timeout of 0 is an indefinite wait. +func (rp *ResourcePool) Get(ctx context.Context) (resource Resource, err error) { + // If ctx has already expired, avoid racing with rp's resource channel. + select { + case <-ctx.Done(): + return nil, ErrCtxTimeout + default: + } + + // Fetch + var wrapper resourceWrapper + var ok bool + + select { + case wrapper, ok = <-rp.resources: + default: + startTime := time.Now() + + select { + case wrapper, ok = <-rp.resources: + case <-ctx.Done(): + return nil, ErrTimeout + } + + rp.recordWait(startTime) + } + + if !ok { + return nil, ErrClosed + } + + // Unwrap + if wrapper.resource == nil { + wrapper.resource, err = rp.factory() + + if err != nil { + rp.resources <- resourceWrapper{} + + return nil, err + } + + rp.active.Add(1) + } + + if rp.available.Add(-1) <= 0 { + rp.exhausted.Add(1) + } + + rp.inUse.Add(1) + + return wrapper.resource, err +} + +// Put will return a resource to the pool. +// For every successful Get, a corresponding Put is required. +// If you no longer need a resource, you will need to call Put(nil) instead of returning the closed resource. +// This will cause a new resource to be created in its place. +func (rp *ResourcePool) Put(resource Resource) { + var wrapper resourceWrapper + + if resource != nil { + wrapper = resourceWrapper{ + resource: resource, + timeUsed: time.Now(), + } + } else { + rp.reopenResource(&wrapper) + } + + select { + case rp.resources <- wrapper: + default: + panic(errors.New("attempt to Put into a full ResourcePool")) + } + + rp.inUse.Add(-1) + rp.available.Add(1) +} + +func (rp *ResourcePool) reopenResource(wrapper *resourceWrapper) { + if r, err := rp.factory(); err == nil { + wrapper.resource = r + wrapper.timeUsed = time.Now() + } else { + wrapper.resource = nil + + rp.active.Add(-1) + } +} + +// SetCapacity changes the capacity of the pool. +// You can use it to shrink or expand, but not beyond the max capacity. +// If the change requires the pool to be shrunk, SetCapacity waits till the necessary number of resources are returned to the pool. +// A SetCapacity of 0 is equivalent to closing the ResourcePool. +func (rp *ResourcePool) SetCapacity(capacity int) error { + if capacity < 0 || capacity > cap(rp.resources) { + return fmt.Errorf("capacity %d is out of range", capacity) + } + + // Atomically swap new capacity with old, but only if old capacity is non-zero. + var oldcap int + + for { + oldcap = int(rp.capacity.Get()) + + if oldcap == 0 { + return ErrClosed + } + + if oldcap == capacity { + return nil + } + + if rp.capacity.CompareAndSwap(int64(oldcap), int64(capacity)) { + break + } + } + + if capacity < oldcap { + for i := 0; i < oldcap-capacity; i++ { + wrapper := <-rp.resources + + if wrapper.resource != nil { + wrapper.resource.Close() + rp.active.Add(-1) + } + + rp.available.Add(-1) + } + } else { + for i := 0; i < capacity-oldcap; i++ { + rp.resources <- resourceWrapper{} + rp.available.Add(1) + } + } + + if capacity == 0 { + close(rp.resources) + } + + return nil +} + +func (rp *ResourcePool) recordWait(start time.Time) { + rp.waitCount.Add(1) + rp.waitTime.Add(time.Since(start)) +} + +// SetIdleTimeout sets the idle timeout. +// It can only be used if there was an idle timeout set when the pool was created. +func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) { + if rp.idleTimer == nil { + panic("SetIdleTimeout called when timer not initialized") + } + + rp.idleTimeout.Set(idleTimeout) + rp.idleTimer.SetInterval(idleTimeout / 10) +} + +// StatsJSON returns the stats in JSON format. +func (rp *ResourcePool) StatsJSON() string { + return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "Active": %v, "InUse": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v, "IdleClosed": %v, "Exhausted": %v}`, + rp.Capacity(), + rp.Available(), + rp.Active(), + rp.InUse(), + rp.MaxCap(), + rp.WaitCount(), + rp.WaitTime().Nanoseconds(), + rp.IdleTimeout().Nanoseconds(), + rp.IdleClosed(), + rp.Exhausted(), + ) +} + +// Capacity returns the capacity. +func (rp *ResourcePool) Capacity() int64 { + return rp.capacity.Get() +} + +// Available returns the number of currently unused and available resources. +func (rp *ResourcePool) Available() int64 { + return rp.available.Get() +} + +// Active returns the number of active (i.e. non-nil) resources either in the pool or claimed for use +func (rp *ResourcePool) Active() int64 { + return rp.active.Get() +} + +// InUse returns the number of claimed resources from the pool +func (rp *ResourcePool) InUse() int64 { + return rp.inUse.Get() +} + +// MaxCap returns the max capacity. +func (rp *ResourcePool) MaxCap() int64 { + return int64(cap(rp.resources)) +} + +// WaitCount returns the total number of waits. +func (rp *ResourcePool) WaitCount() int64 { + return rp.waitCount.Get() +} + +// WaitTime returns the total wait time. +func (rp *ResourcePool) WaitTime() time.Duration { + return rp.waitTime.Get() +} + +// IdleTimeout returns the idle timeout. +func (rp *ResourcePool) IdleTimeout() time.Duration { + return rp.idleTimeout.Get() +} + +// IdleClosed returns the count of resources closed due to idle timeout. +func (rp *ResourcePool) IdleClosed() int64 { + return rp.idleClosed.Get() +} + +// Exhausted returns the number of times Available dropped below 1 +func (rp *ResourcePool) Exhausted() int64 { + return rp.exhausted.Get() +} diff --git a/vendor/github.com/shenghui0779/vitess_pool/semaphore.go b/vendor/github.com/shenghui0779/vitess_pool/semaphore.go new file mode 100644 index 00000000..34772431 --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/semaphore.go @@ -0,0 +1,64 @@ +package vitess_pool + +import "time" + +// Semaphore is a counting semaphore with the option to specify a timeout. +type Semaphore struct { + slots chan struct{} + timeout time.Duration +} + +// NewSemaphore creates a Semaphore. The count parameter must be a positive number. +// A timeout of zero means that there is no timeout. +func NewSemaphore(count int, timeout time.Duration) *Semaphore { + sem := &Semaphore{ + slots: make(chan struct{}, count), + timeout: timeout, + } + + for i := 0; i < count; i++ { + sem.slots <- struct{}{} + } + + return sem +} + +// Acquire returns true on successful acquisition, and false on a timeout. +func (sem *Semaphore) Acquire() bool { + if sem.timeout == 0 { + <-sem.slots + return true + } + + tm := time.NewTimer(sem.timeout) + defer tm.Stop() + + select { + case <-sem.slots: + return true + case <-tm.C: + return false + } +} + +// TryAcquire acquires a semaphore if it's immediately available. +// It returns false otherwise. +func (sem *Semaphore) TryAcquire() bool { + select { + case <-sem.slots: + return true + default: + return false + } +} + +// Release releases the acquired semaphore. +// You must not release more than the number of semaphores you've acquired. +func (sem *Semaphore) Release() { + sem.slots <- struct{}{} +} + +// Size returns the current number of available slots. +func (sem *Semaphore) Size() int { + return len(sem.slots) +} diff --git a/vendor/github.com/shenghui0779/vitess_pool/timer.go b/vendor/github.com/shenghui0779/vitess_pool/timer.go new file mode 100644 index 00000000..004bf0a1 --- /dev/null +++ b/vendor/github.com/shenghui0779/vitess_pool/timer.go @@ -0,0 +1,160 @@ +package vitess_pool + +import ( + "sync" + "time" +) + +// Out-of-band messages +type typeAction int + +const ( + timerStop typeAction = iota + timerReset + timerTrigger +) + +/* +Timer provides timer functionality that can be controlled +by the user. You start the timer by providing it a callback function, +which it will call at the specified interval. + + var t = NewTimer(1e9) + t.Start(KeepHouse) + + func KeepHouse() { + // do house keeping work + } + +You can stop the timer by calling t.Stop, which is guaranteed to +wait if KeepHouse is being executed. + +You can create an untimely trigger by calling t.Trigger. You can also +schedule an untimely trigger by calling t.TriggerAfter. + +The timer interval can be changed on the fly by calling t.SetInterval. +A zero value interval will cause the timer to wait indefinitely, and it +will react only to an explicit Trigger or Stop. +*/ +type Timer struct { + interval AtomicDuration + + // state management + mu sync.Mutex + running bool + + // msg is used for out-of-band messages + msg chan typeAction +} + +// NewTimer creates a new Timer object +func NewTimer(interval time.Duration) *Timer { + tm := &Timer{ + msg: make(chan typeAction), + } + + tm.interval.Set(interval) + + return tm +} + +// Start starts the timer. +func (tm *Timer) Start(keephouse func()) { + tm.mu.Lock() + defer tm.mu.Unlock() + + if tm.running { + return + } + + tm.running = true + + go tm.run(keephouse) +} + +func (tm *Timer) run(keephouse func()) { + var timer *time.Timer + + for { + var ch <-chan time.Time + interval := tm.interval.Get() + + if interval > 0 { + timer = time.NewTimer(interval) + ch = timer.C + } + + select { + case action := <-tm.msg: + if timer != nil { + timer.Stop() + timer = nil + } + switch action { + case timerStop: + return + case timerReset: + continue + } + case <-ch: + } + + keephouse() + } +} + +// SetInterval changes the wait interval. +// It will cause the timer to restart the wait. +func (tm *Timer) SetInterval(ns time.Duration) { + tm.interval.Set(ns) + + tm.mu.Lock() + defer tm.mu.Unlock() + + if tm.running { + tm.msg <- timerReset + } +} + +// Trigger will cause the timer to immediately execute the keephouse function. +// It will then cause the timer to restart the wait. +func (tm *Timer) Trigger() { + tm.mu.Lock() + defer tm.mu.Unlock() + + if tm.running { + tm.msg <- timerTrigger + } +} + +// TriggerAfter waits for the specified duration and triggers the next event. +func (tm *Timer) TriggerAfter(duration time.Duration) { + go func() { + time.Sleep(duration) + tm.Trigger() + }() +} + +// Stop will stop the timer. +// It guarantees that the timer will not execute any more calls to keephouse once it has returned. +func (tm *Timer) Stop() { + tm.mu.Lock() + defer tm.mu.Unlock() + + if tm.running { + tm.msg <- timerStop + tm.running = false + } +} + +// Interval returns the current interval. +func (tm *Timer) Interval() time.Duration { + return tm.interval.Get() +} + +func (tm *Timer) Running() bool { + tm.mu.Lock() + defer tm.mu.Unlock() + + return tm.running +} diff --git a/vendor/github.com/shenghui0779/yiigo/.gitignore b/vendor/github.com/shenghui0779/yiigo/.gitignore new file mode 100644 index 00000000..ad02c6cf --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.vscode/ +.idea/ +vendor/ +logs/ +*.log +.env diff --git a/vendor/github.com/shenghui0779/yiigo/LICENSE b/vendor/github.com/shenghui0779/yiigo/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/shenghui0779/yiigo/README.md b/vendor/github.com/shenghui0779/yiigo/README.md new file mode 100644 index 00000000..1c6e8c5a --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/README.md @@ -0,0 +1,521 @@ +# yiigo + +[![golang](https://img.shields.io/badge/Language-Go-green.svg?style=flat)](https://golang.org) [![GitHub release](https://img.shields.io/github/release/shenghui0779/yiigo.svg)](https://github.com/shenghui0779/yiigo/releases/latest) [![pkg.go.dev](https://img.shields.io/badge/dev-reference-007d9c?logo=go&logoColor=white&style=flat)](https://pkg.go.dev/github.com/shenghui0779/yiigo) [![Apache 2.0 license](http://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](http://opensource.org/licenses/apache2.0) + +一个好用的轻量级 Go 开发通用库。如果你不喜欢过度封装的重量级框架,这个库可能是个不错的选择 😊 + +## Features + +- 支持 [MySQL](https://github.com/go-sql-driver/mysql) +- 支持 [PostgreSQL](https://github.com/lib/pq) +- 支持 [SQLite3](https://github.com/mattn/go-sqlite3) +- 支持 [MongoDB](https://github.com/mongodb/mongo-go-driver) +- 支持 [Redis](https://github.com/gomodule/redigo) +- 支持 [NSQ](https://github.com/nsqio/go-nsq) +- SQL使用 [sqlx](https://github.com/jmoiron/sqlx) +- ORM推荐 [ent](https://github.com/ent/ent) +- 日志使用 [zap](https://github.com/uber-go/zap) +- 配置使用 [dotenv](https://github.com/joho/godotenv),支持(包括 k8s configmap)热加载 +- 其他 + - gRPC Client 连接池 + - 轻量的 SQL Builder + - 基于 Redis 的简单分布式锁 + - Websocket 简单使用封装(支持授权校验) + - 简易的单时间轮(支持一次性和多次重试任务) + - 实用的辅助方法,包含:http、cypto、date、IP、validator、version compare 等 + +## Installation + +```sh +go get -u github.com/shenghui0779/yiigo +``` + +## Usage + +#### ENV + +- load + +```go +// 默认加载当前目录下的`.env`文件 +yiigo.LoadEnv() + +// 加载指定配置文件 +yiigo.LoadEnv(yiigo.WithEnvFile("mycfg.env")) + +// 热加载 +yiigo.LoadEnv(yiigo.WithEnvWatcher(func(e fsnotify.Event) { + fmt.Println(e.String()) +})) +``` + +- `.env` + +```sh +ENV=dev +``` + +- usage + +```go +fmt.Println(os.Getenv("ENV")) +// output: dev +``` + +#### DB + +- register + +```go +yiigo.Init( + yiigo.WithMySQL(yiigo.Default, &yiigo.DBConfig{ + DSN: "dsn", + Options: &yiigo.DBOptions{ + MaxOpenConns: 20, + MaxIdleConns: 10, + ConnMaxLifetime: 10 * time.Minute, + ConnMaxIdleTime: 5 * time.Minute, + }, + }), + + yiigo.WithMySQL("other", &yiigo.DBConfig{ + DSN: "dsn", + Options: &yiigo.DBOptions{ + MaxOpenConns: 20, + MaxIdleConns: 10, + ConnMaxLifetime: 10 * time.Minute, + ConnMaxIdleTime: 5 * time.Minute, + }, + }), +) +``` + +- sqlx + +```go +// default db +yiigo.DB().Get(&User{}, "SELECT * FROM user WHERE id = ?", 1) + +// other db +yiigo.DB("other").Get(&User{}, "SELECT * FROM user WHERE id = ?", 1) +``` + +- ent + +```go +import "/ent" + +// default driver +client := ent.NewClient(ent.Driver(yiigo.EntDriver())) + +// other driver +client := ent.NewClient(ent.Driver(yiigo.EntDriver("other"))) +``` + +#### MongoDB + +```go +// register +yiigo.Init( + yiigo.WithMongo(yiigo.Default, "dsn"), + yiigo.WithMongo("other", "dsn"), +) + +// default mongodb +yiigo.Mongo().Database("test").Collection("numbers").InsertOne(context.Background(), bson.M{"name": "pi", "value": 3.14159}) + +// other mongodb +yiigo.Mongo("other").Database("test").Collection("numbers").InsertOne(context.Background(), bson.M{"name": "pi", "value": 3.14159}) +``` + +#### Redis + +```go +// register +yiigo.Init( + yiigo.WithRedis(yiigo.Default, &yiigo.RedisConfig{ + Addr: "addr", + Options: &yiigo.RedisOptions{ + ConnTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, + }), + + yiigo.WithRedis("other", &yiigo.RedisConfig{ + Addr: "addr", + Options: &yiigo.RedisOptions{ + ConnTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, + }), +) + +// default redis +conn, err := yiigo.Redis().Get(context.Background()) + +if err != nil { + log.Fatal(err) +} + +defer yiigo.Redis().Put(conn) + +conn.Do("SET", "test_key", "hello world") + +// other redis +conn, err := yiigo.Redis("other").Get(context.Background()) + +if err != nil { + log.Fatal(err) +} + +defer yiigo.Redis("other").Put(conn) + +conn.Do("SET", "test_key", "hello world") +``` + +#### Logger + +```go +// register +yiigo.Init( + yiigo.WithLogger(yiigo.Default, yiigo.LoggerConfig{ + Filename: "filename", + Options: &yiigo.LoggerOptions{ + Stderr: true, + }, + }), + + yiigo.WithLogger("other", yiigo.LoggerConfig{ + Filename: "filename", + Options: &yiigo.LoggerOptions{ + Stderr: true, + }, + }), +) + +// default logger +yiigo.Logger().Info("hello world") + +// other logger +yiigo.Logger("other").Info("hello world") +``` + +#### gRPC Pool + +```go +// create pool +pool := yiigo.NewGrpcPool(&yiigo.GrpcPoolConfig{ + Dialer: func() (*grpc.ClientConn, error) { + return grpc.DialContext(context.Background(), "target", + grpc.WithInsecure(), + grpc.WithBlock(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Second * 30, + Timeout: time.Second * 10, + }), + ) + }, + Options: &yiigo.PoolOptions{ + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, +}) + +// use pool +conn, err := pool.Get(context.Background()) + +if err != nil { + return err +} + +defer pool.Put(conn) + +// coding... +``` + +#### HTTP + +```go +// default client +yiigo.HTTPGet(context.Background(), "URL") + +// new client +client := yiigo.NewHTTPClient(*http.Client) +client.Do(context.Background(), http.MethodGet, "URL", nil) + +// upload +form := yiigo.NewUploadForm( + yiigo.WithFormField("title", "TITLE"), + yiigo.WithFormField("description", "DESCRIPTION"), + yiigo.WithFormFile("media", "demo.mp4", func(w io.Writer) error { + f, err := os.Open("demo.mp4") + + if err != nil { + return err + } + + defer f.Close() + + if _, err = io.Copy(w, f); err != nil { + return err + } + + return nil + }), +) + +yiigo.HTTPUpload(context.Background(), "URL", form) +``` + +#### SQL Builder + +> 😊 为不想手写SQL的你生成SQL语句,用于 `sqlx` 的相关方法; +> +> ⚠️ 作为辅助方法,目前支持的特性有限,复杂的SQL(如:子查询等)还需自己手写 + +```go +builder := yiigo.NewMySQLBuilder() +// builder := yiigo.NewSQLBuilder(yiigo.MySQL) +``` + +- Query + +```go +ctx := context.Background() + +builder.Wrap( + yiigo.Table("user"), + yiigo.Where("id = ?", 1), +).ToQuery(ctx) +// SELECT * FROM user WHERE id = ? +// [1] + +builder.Wrap( + yiigo.Table("user"), + yiigo.Where("name = ? AND age > ?", "shenghui0779", 20), +).ToQuery(ctx) +// SELECT * FROM user WHERE name = ? AND age > ? +// [shenghui0779 20] + +builder.Wrap( + yiigo.Table("user"), + yiigo.WhereIn("age IN (?)", []int{20, 30}), +).ToQuery(ctx) +// SELECT * FROM user WHERE age IN (?, ?) +// [20 30] + +builder.Wrap( + yiigo.Table("user"), + yiigo.Select("id", "name", "age"), + yiigo.Where("id = ?", 1), +).ToQuery(ctx) +// SELECT id, name, age FROM user WHERE id = ? +// [1] + +builder.Wrap( + yiigo.Table("user"), + yiigo.Distinct("name"), + yiigo.Where("id = ?", 1), +).ToQuery(ctx) +// SELECT DISTINCT name FROM user WHERE id = ? +// [1] + +builder.Wrap( + yiigo.Table("user"), + yiigo.LeftJoin("address", "user.id = address.user_id"), + yiigo.Where("user.id = ?", 1), +).ToQuery(ctx) +// SELECT * FROM user LEFT JOIN address ON user.id = address.user_id WHERE user.id = ? +// [1] + +builder.Wrap( + yiigo.Table("address"), + yiigo.Select("user_id", "COUNT(*) AS total"), + yiigo.GroupBy("user_id"), + yiigo.Having("user_id = ?", 1), +).ToQuery(ctx) +// SELECT user_id, COUNT(*) AS total FROM address GROUP BY user_id HAVING user_id = ? +// [1] + +builder.Wrap( + yiigo.Table("user"), + yiigo.Where("age > ?", 20), + yiigo.OrderBy("age ASC", "id DESC"), + yiigo.Offset(5), + yiigo.Limit(10), +).ToQuery(ctx) +// SELECT * FROM user WHERE age > ? ORDER BY age ASC, id DESC LIMIT ? OFFSET ? +// [20, 10, 5] + +wrap1 := builder.Wrap( + Table("user_1"), + Where("id = ?", 2), +) + +builder.Wrap( + Table("user_0"), + Where("id = ?", 1), + Union(wrap1), +).ToQuery(ctx) +// (SELECT * FROM user_0 WHERE id = ?) UNION (SELECT * FROM user_1 WHERE id = ?) +// [1, 2] + +builder.Wrap( + Table("user_0"), + Where("id = ?", 1), + UnionAll(wrap1), +).ToQuery(ctx) +// (SELECT * FROM user_0 WHERE id = ?) UNION ALL (SELECT * FROM user_1 WHERE id = ?) +// [1, 2] + +builder.Wrap( + Table("user_0"), + WhereIn("age IN (?)", []int{10, 20}), + Limit(5), + Union( + builder.Wrap( + Table("user_1"), + Where("age IN (?)", []int{30, 40}), + Limit(5), + ), + ), +).ToQuery(ctx) +// (SELECT * FROM user_0 WHERE age IN (?, ?) LIMIT ?) UNION (SELECT * FROM user_1 WHERE age IN (?, ?) LIMIT ?) +// [10, 20, 5, 30, 40, 5] +``` + +- Insert + +```go +ctx := context.Background() + +type User struct { + ID int `db:"-"` + Name string `db:"name"` + Age int `db:"age"` + Phone string `db:"phone,omitempty"` +} + +builder.Wrap(Table("user")).ToInsert(ctx, &User{ + Name: "yiigo", + Age: 29, +}) +// INSERT INTO user (name, age) VALUES (?, ?) +// [yiigo 29] + +builder.Wrap(yiigo.Table("user")).ToInsert(ctx, yiigo.X{ + "name": "yiigo", + "age": 29, +}) +// INSERT INTO user (name, age) VALUES (?, ?) +// [yiigo 29] +``` + +- Batch Insert + +```go +ctx := context.Background() + +type User struct { + ID int `db:"-"` + Name string `db:"name"` + Age int `db:"age"` + Phone string `db:"phone,omitempty"` +} + +builder.Wrap(Table("user")).ToBatchInsert(ctx, []*User{ + { + Name: "shenghui0779", + Age: 20, + }, + { + Name: "yiigo", + Age: 29, + }, +}) +// INSERT INTO user (name, age) VALUES (?, ?), (?, ?) +// [shenghui0779 20 yiigo 29] + +builder.Wrap(yiigo.Table("user")).ToBatchInsert(ctx, []yiigo.X{ + { + "name": "shenghui0779", + "age": 20, + }, + { + "name": "yiigo", + "age": 29, + }, +}) +// INSERT INTO user (name, age) VALUES (?, ?), (?, ?) +// [shenghui0779 20 yiigo 29] +``` + +- Update + +```go +ctx := context.Background() + +type User struct { + Name string `db:"name"` + Age int `db:"age"` + Phone string `db:"phone,omitempty"` +} + +builder.Wrap( + Table("user"), + Where("id = ?", 1), +).ToUpdate(ctx, &User{ + Name: "yiigo", + Age: 29, +}) +// UPDATE user SET name = ?, age = ? WHERE id = ? +// [yiigo 29 1] + +builder.Wrap( + yiigo.Table("user"), + yiigo.Where("id = ?", 1), +).ToUpdate(ctx, yiigo.X{ + "name": "yiigo", + "age": 29, +}) +// UPDATE user SET name = ?, age = ? WHERE id = ? +// [yiigo 29 1] + +builder.Wrap( + yiigo.Table("product"), + yiigo.Where("id = ?", 1), +).ToUpdate(ctx, yiigo.X{ + "price": yiigo.Clause("price * ? + ?", 2, 100), +}) +// UPDATE product SET price = price * ? + ? WHERE id = ? +// [2 100 1] +``` + +- Delete + +```go +ctx := context.Background() + +builder.Wrap( + yiigo.Table("user"), + yiigo.Where("id = ?", 1), +).ToDelete(ctx) +// DELETE FROM user WHERE id = ? +// [1] + +builder.Wrap(Table("user")).ToTruncate(ctx) +// TRUNCATE user +``` + +## Documentation + +- [API Reference](https://pkg.go.dev/github.com/shenghui0779/yiigo) +- [Example](https://github.com/shenghui0779/tplgo) + +**Enjoy 😊** diff --git a/vendor/github.com/shenghui0779/yiigo/crypto.go b/vendor/github.com/shenghui0779/yiigo/crypto.go new file mode 100644 index 00000000..a970e5bc --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/crypto.go @@ -0,0 +1,701 @@ +package yiigo + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "errors" +) + +// PaddingMode aes padding mode +type PaddingMode string + +const ( + // ZERO zero padding mode + ZERO PaddingMode = "ZERO" + // PKCS5 PKCS#5 padding mode + PKCS5 PaddingMode = "PKCS#5" + // PKCS7 PKCS#7 padding mode + PKCS7 PaddingMode = "PKCS#7" +) + +// PemBlockType pem block type which taken from the preamble. +type PemBlockType string + +const ( + // RSAPKCS1 private key in PKCS#1 + RSAPKCS1 PemBlockType = "RSA PRIVATE KEY" + // RSAPKCS8 private key in PKCS#8 + RSAPKCS8 PemBlockType = "PRIVATE KEY" +) + +// AESCrypto is the interface for aes crypto. +type AESCrypto interface { + // Encrypt encrypts the plain text. + Encrypt(plainText []byte) ([]byte, error) + + // Decrypt decrypts the cipher text. + Decrypt(cipherText []byte) ([]byte, error) +} + +type cbccrypto struct { + key []byte + iv []byte + mode PaddingMode +} + +func (c *cbccrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + switch c.mode { + case ZERO: + plainText = ZeroPadding(plainText, block.BlockSize()) + case PKCS5: + plainText = PKCS5Padding(plainText, block.BlockSize()) + case PKCS7: + plainText = PKCS5Padding(plainText, len(c.key)) + } + + cipherText := make([]byte, len(plainText)) + + blockMode := cipher.NewCBCEncrypter(block, c.iv) + blockMode.CryptBlocks(cipherText, plainText) + + return cipherText, nil +} + +func (c *cbccrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + plainText := make([]byte, len(cipherText)) + + blockMode := cipher.NewCBCDecrypter(block, c.iv) + blockMode.CryptBlocks(plainText, cipherText) + + switch c.mode { + case ZERO: + plainText = ZeroUnPadding(plainText) + case PKCS5: + plainText = PKCS5Unpadding(plainText, block.BlockSize()) + case PKCS7: + plainText = PKCS5Unpadding(plainText, len(c.key)) + } + + return plainText, nil +} + +// NewCBCCrypto returns a new aes-cbc crypto. +func NewCBCCrypto(key, iv []byte, mode PaddingMode) AESCrypto { + return &cbccrypto{ + key: key, + iv: iv, + mode: mode, + } +} + +type ecbcrypto struct { + key []byte + mode PaddingMode +} + +func (c *ecbcrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + switch c.mode { + case ZERO: + plainText = ZeroPadding(plainText, block.BlockSize()) + case PKCS5: + plainText = PKCS5Padding(plainText, block.BlockSize()) + case PKCS7: + plainText = PKCS5Padding(plainText, len(c.key)) + } + + cipherText := make([]byte, len(plainText)) + + blockMode := NewECBEncrypter(block) + blockMode.CryptBlocks(cipherText, plainText) + + return cipherText, nil +} + +func (c *ecbcrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + plainText := make([]byte, len(cipherText)) + + blockMode := NewECBDecrypter(block) + blockMode.CryptBlocks(plainText, cipherText) + + switch c.mode { + case ZERO: + plainText = ZeroUnPadding(plainText) + case PKCS5: + plainText = PKCS5Unpadding(plainText, block.BlockSize()) + case PKCS7: + plainText = PKCS5Unpadding(plainText, len(c.key)) + } + + return plainText, nil +} + +// NewECBCrypto returns a new aes-ecb crypto. +func NewECBCrypto(key []byte, mode PaddingMode) AESCrypto { + return &ecbcrypto{ + key: key, + mode: mode, + } +} + +type cfbcrypto struct { + key []byte + iv []byte +} + +func (c *cfbcrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + cipherText := make([]byte, len(plainText)) + + stream := cipher.NewCFBEncrypter(block, c.iv) + stream.XORKeyStream(cipherText, plainText) + + return cipherText, nil +} + +func (c *cfbcrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + plainText := make([]byte, len(cipherText)) + + stream := cipher.NewCFBDecrypter(block, c.iv) + stream.XORKeyStream(plainText, cipherText) + + return plainText, nil +} + +// NewCFBCrypto returns a new aes-cfb crypto +func NewCFBCrypto(key, iv []byte) AESCrypto { + return &cfbcrypto{ + key: key, + iv: iv, + } +} + +type ofbcrypto struct { + key []byte + iv []byte +} + +func (c *ofbcrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + cipherText := make([]byte, len(plainText)) + + stream := cipher.NewOFB(block, c.iv) + stream.XORKeyStream(cipherText, plainText) + + return cipherText, nil +} + +func (c *ofbcrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + plainText := make([]byte, len(cipherText)) + + stream := cipher.NewOFB(block, c.iv) + stream.XORKeyStream(plainText, cipherText) + + return plainText, nil +} + +// NewOFBCrypto returns a new aes-ofb crypto +func NewOFBCrypto(key, iv []byte) AESCrypto { + return &ofbcrypto{ + key: key, + iv: iv, + } +} + +type ctrcrypto struct { + key []byte + iv []byte +} + +func (c *ctrcrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + cipherText := make([]byte, len(plainText)) + + stream := cipher.NewCTR(block, c.iv) + stream.XORKeyStream(cipherText, plainText) + + return cipherText, nil +} + +func (c *ctrcrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + if len(c.iv) != block.BlockSize() { + return nil, errors.New("IV length must equal block size") + } + + plainText := make([]byte, len(cipherText)) + + stream := cipher.NewCTR(block, c.iv) + stream.XORKeyStream(plainText, cipherText) + + return plainText, nil +} + +// NewCTRCrypto returns a new aes-ctr crypto +func NewCTRCrypto(key, iv []byte) AESCrypto { + return &ctrcrypto{ + key: key, + iv: iv, + } +} + +type gcmcrypto struct { + key []byte + nonce []byte +} + +func (c *gcmcrypto) Encrypt(plainText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + aesgcm, err := cipher.NewGCM(block) + + if err != nil { + return nil, err + } + + if len(c.nonce) != aesgcm.NonceSize() { + return nil, errors.New("nonce length must equal gcm standard nonce size") + } + + return aesgcm.Seal(nil, c.nonce, plainText, nil), nil +} + +func (c *gcmcrypto) Decrypt(cipherText []byte) ([]byte, error) { + block, err := aes.NewCipher(c.key) + + if err != nil { + return nil, err + } + + aesgcm, err := cipher.NewGCM(block) + + if err != nil { + return nil, err + } + + if len(c.nonce) != aesgcm.NonceSize() { + return nil, errors.New("nonce length must equal gcm standard nonce size") + } + + return aesgcm.Open(nil, c.nonce, cipherText, nil) +} + +// NewGCMCrypto returns a new aes-gcm crypto +func NewGCMCrypto(key, nonce []byte) AESCrypto { + return &gcmcrypto{ + key: key, + nonce: nonce, + } +} + +// GenerateRSAKey returns rsa private and public key. +func GenerateRSAKey(bitSize int, blockType PemBlockType) (privateKey, publicKey []byte, err error) { + prvKey, err := rsa.GenerateKey(rand.Reader, bitSize) + + if err != nil { + return + } + + pkixb, err := x509.MarshalPKIXPublicKey(prvKey.Public()) + + if err != nil { + return + } + + privateBlock := &pem.Block{Type: string(blockType)} + + switch blockType { + case RSAPKCS1: + privateBlock.Bytes = x509.MarshalPKCS1PrivateKey(prvKey) + case RSAPKCS8: + privateBlock.Bytes, err = x509.MarshalPKCS8PrivateKey(prvKey) + + if err != nil { + return + } + } + + privateKey = pem.EncodeToMemory(privateBlock) + + publicKey = pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pkixb, + }) + + return +} + +// RSAEncrypt rsa encrypt with PKCS #1 v1.5 +func RSAEncrypt(plainText, publicKey []byte) ([]byte, error) { + block, _ := pem.Decode(publicKey) + + if block == nil { + return nil, errors.New("invalid rsa public key for pem.Decode") + } + + pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) + + if err != nil { + return nil, err + } + + key, ok := pubKey.(*rsa.PublicKey) + + if !ok { + return nil, errors.New("invalid rsa public key, expects rsa.PublicKey") + } + + return rsa.EncryptPKCS1v15(rand.Reader, key, plainText) +} + +// RSADecrypt rsa decrypt with PKCS #1 v1.5 +func RSADecrypt(cipherText, privateKey []byte) ([]byte, error) { + block, _ := pem.Decode(privateKey) + + if block == nil { + return nil, errors.New("invalid rsa private key for pem.Decode") + } + + var ( + key interface{} + err error + ) + + switch PemBlockType(block.Type) { + case RSAPKCS1: + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + case RSAPKCS8: + key, err = x509.ParsePKCS8PrivateKey(block.Bytes) + } + + if err != nil { + return nil, err + } + + rsaKey, ok := key.(*rsa.PrivateKey) + + if !ok { + return nil, errors.New("invalid rsa private key, expects rsa.PrivateKey") + } + + return rsa.DecryptPKCS1v15(rand.Reader, rsaKey, cipherText) +} + +// RSAEncryptOEAP rsa encrypt with PKCS #1 OEAP. +func RSAEncryptOEAP(plainText, publicKey []byte) ([]byte, error) { + block, _ := pem.Decode(publicKey) + + if block == nil { + return nil, errors.New("invalid rsa public key for pem.Decode") + } + + pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) + + if err != nil { + return nil, err + } + + key, ok := pubKey.(*rsa.PublicKey) + + if !ok { + return nil, errors.New("invalid rsa public key, expects rsa.PublicKey") + } + + return rsa.EncryptOAEP(sha1.New(), rand.Reader, key, plainText, nil) +} + +// RSADecryptOEAP rsa decrypt with PKCS #1 OEAP. +func RSADecryptOEAP(cipherText, privateKey []byte) ([]byte, error) { + block, _ := pem.Decode(privateKey) + + if block == nil { + return nil, errors.New("invalid rsa private key for pem.Decode") + } + + var ( + key interface{} + err error + ) + + switch PemBlockType(block.Type) { + case RSAPKCS1: + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + case RSAPKCS8: + key, err = x509.ParsePKCS8PrivateKey(block.Bytes) + } + + if err != nil { + return nil, err + } + + rsaKey, ok := key.(*rsa.PrivateKey) + + if !ok { + return nil, errors.New("invalid rsa private key, expects rsa.PrivateKey") + } + + return rsa.DecryptOAEP(sha1.New(), rand.Reader, rsaKey, cipherText, nil) +} + +// RSASignWithSha256 returns rsa signature with sha256. +func RSASignWithSha256(data, privateKey []byte) ([]byte, error) { + block, _ := pem.Decode(privateKey) + + if block == nil { + return nil, errors.New("invalid rsa private key for pem.Decode") + } + + var ( + key interface{} + err error + ) + + switch PemBlockType(block.Type) { + case RSAPKCS1: + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + case RSAPKCS8: + key, err = x509.ParsePKCS8PrivateKey(block.Bytes) + } + + if err != nil { + return nil, err + } + + rsaKey, ok := key.(*rsa.PrivateKey) + + if !ok { + return nil, errors.New("invalid rsa private key, expects rsa.PrivateKey") + } + + h := sha256.New() + h.Write(data) + + signature, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, crypto.SHA256, h.Sum(nil)) + + if err != nil { + return nil, err + } + + return signature, nil +} + +// RSAVerifyWithSha256 verifies rsa signature with sha256. +func RSAVerifyWithSha256(data, signature, publicKey []byte) error { + block, _ := pem.Decode(publicKey) + + if block == nil { + return errors.New("invalid rsa public key for pem.Decode") + } + + pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) + + if err != nil { + return err + } + + key, ok := pubKey.(*rsa.PublicKey) + + if !ok { + return errors.New("invalid rsa public key, expects rsa.PublicKey") + } + + hashed := sha256.Sum256(data) + + return rsa.VerifyPKCS1v15(key, crypto.SHA256, hashed[:], signature) +} + +func ZeroPadding(cipherText []byte, blockSize int) []byte { + padding := blockSize - len(cipherText)%blockSize + padText := bytes.Repeat([]byte{0}, padding) + + return append(cipherText, padText...) +} + +func ZeroUnPadding(plainText []byte) []byte { + return bytes.TrimRightFunc(plainText, func(r rune) bool { + return r == rune(0) + }) +} + +func PKCS5Padding(cipherText []byte, blockSize int) []byte { + padding := blockSize - len(cipherText)%blockSize + + if padding == 0 { + padding = blockSize + } + + padText := bytes.Repeat([]byte{byte(padding)}, padding) + + return append(cipherText, padText...) +} + +func PKCS5Unpadding(plainText []byte, blockSize int) []byte { + l := len(plainText) + unpadding := int(plainText[l-1]) + + if unpadding < 1 || unpadding > blockSize { + unpadding = 0 + } + + return plainText[:(l - unpadding)] +} + +// ------------- AES-256-ECB ------------- + +type ecb struct { + b cipher.Block + blockSize int +} + +func newECB(b cipher.Block) *ecb { + return &ecb{ + b: b, + blockSize: b.BlockSize(), + } +} + +type ecbEncrypter ecb + +// NewECBEncrypter returns a BlockMode which encrypts in electronic code book mode, using the given Block. +func NewECBEncrypter(b cipher.Block) cipher.BlockMode { + return (*ecbEncrypter)(newECB(b)) +} + +func (x *ecbEncrypter) BlockSize() int { return x.blockSize } + +func (x *ecbEncrypter) CryptBlocks(dst, src []byte) { + if len(src)%x.blockSize != 0 { + panic("crypto/cipher: input not full blocks") + } + + if len(dst) < len(src) { + panic("crypto/cipher: output smaller than input") + } + + for len(src) > 0 { + x.b.Encrypt(dst, src[:x.blockSize]) + src = src[x.blockSize:] + dst = dst[x.blockSize:] + } +} + +type ecbDecrypter ecb + +// NewECBDecrypter returns a BlockMode which decrypts in electronic code book mode, using the given Block. +func NewECBDecrypter(b cipher.Block) cipher.BlockMode { + return (*ecbDecrypter)(newECB(b)) +} + +func (x *ecbDecrypter) BlockSize() int { return x.blockSize } + +func (x *ecbDecrypter) CryptBlocks(dst, src []byte) { + if len(src)%x.blockSize != 0 { + panic("crypto/cipher: input not full blocks") + } + + if len(dst) < len(src) { + panic("crypto/cipher: output smaller than input") + } + + for len(src) > 0 { + x.b.Decrypt(dst, src[:x.blockSize]) + + src = src[x.blockSize:] + dst = dst[x.blockSize:] + } +} diff --git a/vendor/github.com/shenghui0779/yiigo/db.go b/vendor/github.com/shenghui0779/yiigo/db.go new file mode 100644 index 00000000..03de8f3c --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/db.go @@ -0,0 +1,225 @@ +package yiigo + +import ( + "context" + "database/sql" + "fmt" + "runtime/debug" + "sync" + "time" + + entsql "entgo.io/ent/dialect/sql" + _ "github.com/go-sql-driver/mysql" + "github.com/jmoiron/sqlx" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + "go.uber.org/zap" +) + +// DBDriver db driver +type DBDriver string + +const ( + MySQL DBDriver = "mysql" + Postgres DBDriver = "postgres" + SQLite DBDriver = "sqlite3" +) + +var ( + defaultDB *sqlx.DB + dbmap sync.Map + + defaultEntDriver *entsql.Driver + entmap sync.Map +) + +// DBConfig keeps the settings to setup db connection. +type DBConfig struct { + // DSN data source name + // [-- MySQL] username:password@tcp(localhost:3306)/dbname?timeout=10s&charset=utf8mb4&collation=utf8mb4_general_ci&parseTime=True&loc=Local + // [Postgres] host=localhost port=5432 user=root password=secret dbname=test connect_timeout=10 sslmode=disable + // [- SQLite] file::memory:?cache=shared + DSN string `json:"dsn"` + + // Options optional settings to setup db connection. + Options *DBOptions `json:"options"` +} + +// DBOptions optional settings to setup db connection. +type DBOptions struct { + // MaxOpenConns is the maximum number of open connections to the database. + // Use value -1 for no timeout and 0 for default. + // Default is 20. + MaxOpenConns int `json:"max_open_conns"` + + // MaxIdleConns is the maximum number of connections in the idle connection pool. + // Use value -1 for no timeout and 0 for default. + // Default is 10. + MaxIdleConns int `json:"max_idle_conns"` + + // ConnMaxLifetime is the maximum amount of time a connection may be reused. + // Use value -1 for no timeout and 0 for default. + // Default is 10 minutes. + ConnMaxLifetime time.Duration `json:"conn_max_lifetime"` + + // ConnMaxIdleTime is the maximum amount of time a connection may be idle. + // Use value -1 for no timeout and 0 for default. + // Default is 5 minutes. + ConnMaxIdleTime time.Duration `json:"conn_max_idle_time"` +} + +func (o *DBOptions) rebuild(opt *DBOptions) { + if opt.MaxOpenConns > 0 { + o.MaxOpenConns = opt.MaxOpenConns + } else { + if opt.MaxOpenConns == -1 { + o.MaxOpenConns = 0 + } + } + + if opt.MaxIdleConns > 0 { + o.MaxIdleConns = opt.MaxIdleConns + } else { + if opt.MaxIdleConns == -1 { + o.MaxIdleConns = 0 + } + } + + if opt.ConnMaxLifetime > 0 { + o.ConnMaxLifetime = opt.ConnMaxLifetime + } else { + if opt.ConnMaxLifetime == -1 { + o.ConnMaxLifetime = 0 + } + } + + if opt.ConnMaxIdleTime > 0 { + o.ConnMaxIdleTime = opt.ConnMaxIdleTime + } else { + if opt.ConnMaxIdleTime == -1 { + o.ConnMaxIdleTime = 0 + } + } +} + +func initDB(name string, driver DBDriver, cfg *DBConfig) { + db, err := sql.Open(string(driver), cfg.DSN) + + if err != nil { + logger.Panic(fmt.Sprintf("[yiigo] err db.%s open", name), zap.String("dsn", cfg.DSN), zap.Error(err)) + } + + if err = db.Ping(); err != nil { + db.Close() + + logger.Panic(fmt.Sprintf("[yiigo] err db.%s ping", name), zap.String("dsn", cfg.DSN), zap.Error(err)) + } + + opt := &DBOptions{ + MaxOpenConns: 20, + MaxIdleConns: 10, + ConnMaxLifetime: 10 * time.Minute, + ConnMaxIdleTime: 5 * time.Minute, + } + + if cfg.Options != nil { + opt.rebuild(cfg.Options) + } + + db.SetMaxOpenConns(opt.MaxOpenConns) + db.SetMaxIdleConns(opt.MaxIdleConns) + db.SetConnMaxLifetime(opt.ConnMaxLifetime) + db.SetConnMaxIdleTime(opt.ConnMaxIdleTime) + + sqlxDB := sqlx.NewDb(db, string(driver)) + entDriver := entsql.OpenDB(string(driver), db) + + if name == Default { + defaultDB = sqlxDB + defaultEntDriver = entDriver + } + + dbmap.Store(name, sqlxDB) + entmap.Store(name, entDriver) + + logger.Info(fmt.Sprintf("[yiigo] db.%s is OK", name)) +} + +// DB returns a db. +func DB(name ...string) *sqlx.DB { + if len(name) == 0 || name[0] == Default { + if defaultDB == nil { + logger.Panic(fmt.Sprintf("[yiigo] unknown db.%s (forgotten configure?)", Default)) + } + + return defaultDB + } + + v, ok := dbmap.Load(name[0]) + + if !ok { + logger.Panic(fmt.Sprintf("[yiigo] unknown db.%s (forgotten configure?)", name[0])) + } + + return v.(*sqlx.DB) +} + +// EntDriver returns an ent dialect.Driver. +func EntDriver(name ...string) *entsql.Driver { + if len(name) == 0 || name[0] == Default { + if defaultEntDriver == nil { + logger.Panic(fmt.Sprintf("[yiigo] unknown db.%s (forgotten configure?)", Default)) + } + + return defaultEntDriver + } + + v, ok := entmap.Load(name[0]) + + if !ok { + logger.Panic(fmt.Sprintf("[yiigo] unknown db.%s (forgotten configure?)", name[0])) + } + + return v.(*entsql.Driver) +} + +// DBTxHandler db tx callback func. +type DBTxHandler func(ctx context.Context, tx *sqlx.Tx) error + +// DBTransaction Executes db transaction with callback function. +// The provided context is used until the transaction is committed or rolledback. +func DBTransaction(ctx context.Context, db *sqlx.DB, callback DBTxHandler) error { + tx, err := db.BeginTxx(ctx, nil) + + if err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + logger.Error("[yiigo] tx callback panic", zap.Any("error", r), zap.ByteString("stack", debug.Stack())) + + rollback(tx) + } + }() + + if err = callback(ctx, tx); err != nil { + rollback(tx) + + return err + } + + if err = tx.Commit(); err != nil { + rollback(tx) + + return err + } + + return nil +} + +func rollback(tx *sqlx.Tx) { + if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { + logger.Error("[yiigo] err db transaction rollback", zap.Error(err)) + } +} diff --git a/vendor/github.com/shenghui0779/yiigo/env.go b/vendor/github.com/shenghui0779/yiigo/env.go new file mode 100644 index 00000000..8361f207 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/env.go @@ -0,0 +1,182 @@ +package yiigo + +import ( + "errors" + "os" + "path" + "path/filepath" + "runtime/debug" + "strings" + + "github.com/fsnotify/fsnotify" + "github.com/joho/godotenv" + "go.uber.org/zap" +) + +// EnvOnChangeFunc the function that runs each time env change occurs. +type EnvOnChangeFunc func(e fsnotify.Event) + +type environment struct { + path string + watcher bool + eventFn EnvOnChangeFunc +} + +// EnvOption configures how we set up env file. +type EnvOption func(e *environment) + +// WithEnvFile specifies the env file. +func WithEnvFile(filename string) EnvOption { + return func(e *environment) { + if v := strings.TrimSpace(filename); len(v) != 0 { + e.path = filepath.Clean(v) + } + } +} + +// WithEnvWatcher watching and re-reading env file. +func WithEnvWatcher(fn EnvOnChangeFunc) EnvOption { + return func(e *environment) { + e.watcher = true + e.eventFn = fn + } +} + +// LoadEnv will read your env file(s) and load them into ENV for this process. +// It will default to loading .env in the current path if not specifies the filename. +func LoadEnv(options ...EnvOption) error { + env := &environment{path: ".env"} + + for _, f := range options { + f(env) + } + + filename, err := filepath.Abs(env.path) + + if err != nil { + return err + } + + statEnvFile(filename) + + if err := godotenv.Overload(filename); err != nil { + return err + } + + if env.watcher { + go watchEnvFile(filename, env.eventFn) + } + + return nil +} + +func statEnvFile(filename string) { + _, err := os.Stat(filename) + + if err == nil { + return + } + + if err = os.MkdirAll(path.Dir(filename), 0775); err != nil { + return + } + + f, err := os.OpenFile(filename, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0775) + + if err != nil { + return + } + + f.Close() +} + +func watchEnvFile(filename string, fn EnvOnChangeFunc) { + defer func() { + if r := recover(); r != nil { + logger.Error("[yiigo] env watcher panic", zap.Any("error", r), zap.String("env_file", filename), zap.ByteString("stack", debug.Stack())) + } + }() + + watcher, err := fsnotify.NewWatcher() + + if err != nil { + logger.Error("[yiigo] err env watcher", zap.Error(err)) + + return + } + + defer watcher.Close() + + done := make(chan error) + defer close(done) + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Error("[yiigo] env watcher panic", zap.Any("error", r), zap.String("env_file", filename), zap.ByteString("stack", debug.Stack())) + } + }() + + realEnvFile, _ := filepath.EvalSymlinks(filename) + createOrWriteMask := fsnotify.Create | fsnotify.Write + + for { + select { + case event, ok := <-watcher.Events: + if !ok { + done <- errors.New("channel(watcher.Events) is closed") + + return + } + + eventFile := filepath.Clean(event.Name) + + if eventFile == filename { + // the env file was created or modified + if event.Op&createOrWriteMask != 0 { + if err := godotenv.Overload(filename); err != nil { + logger.Error("[yiigo] err env reload", zap.Error(err), zap.String("env_file", filename)) + } + + if fn != nil { + fn(event) + } + } else if event.Op&fsnotify.Remove != 0 { + logger.Warn("[yiigo] env file removed", zap.String("env_file", filename)) + } + } else { + currentEnvFile, _ := filepath.EvalSymlinks(filename) + + // the real filename to the env file changed (eg: k8s ConfigMap replacement) + if len(currentEnvFile) != 0 && currentEnvFile != realEnvFile { + realEnvFile = currentEnvFile + + if err := godotenv.Overload(filename); err != nil { + logger.Error("[yiigo] err env reload", zap.Error(err), zap.String("env_file", filename)) + } + + if fn != nil { + fn(event) + } + } + } + case err, ok := <-watcher.Errors: + if !ok { + err = errors.New("channel(watcher.Errors) is closed") + } + + done <- err + + return + } + } + }() + + if err = watcher.Add(path.Dir(filename)); err != nil { + done <- err + } + + err = <-done + + logger.Error("[yiigo] err env watcher", zap.Error(err), zap.String("env_file", filename)) +} diff --git a/vendor/github.com/shenghui0779/yiigo/form.go b/vendor/github.com/shenghui0779/yiigo/form.go new file mode 100644 index 00000000..82695341 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/form.go @@ -0,0 +1,400 @@ +package yiigo + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "time" +) + +var ( + errUnknownType = errors.New("unknown type") + emptyField = reflect.StructField{} +) + +func ContentType(r *http.Request) string { + content := r.Header.Get("Content-Type") + + for i, char := range content { + if char == ' ' || char == ';' { + return content[:i] + } + } + + return content +} + +func MapQuery(ptr interface{}, m map[string][]string) error { + return MapFormByTag(ptr, m, "query") +} + +func MapForm(ptr interface{}, form map[string][]string) error { + return MapFormByTag(ptr, form, "form") +} + +func MapFormByTag(ptr interface{}, form map[string][]string, tag string) error { + // Check if ptr is a map + ptrVal := reflect.ValueOf(ptr) + var pointed interface{} + if ptrVal.Kind() == reflect.Ptr { + ptrVal = ptrVal.Elem() + pointed = ptrVal.Interface() + } + if ptrVal.Kind() == reflect.Map && + ptrVal.Type().Key().Kind() == reflect.String { + if pointed != nil { + ptr = pointed + } + return setFormMap(ptr, form) + } + + return MappingByPtr(ptr, formSource(form), tag) +} + +// setter tries to set value on a walking by fields of a struct +type setter interface { + TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) +} + +type formSource map[string][]string + +var _ setter = formSource(nil) + +// TrySet tries to set a value by request's form source (like map[string][]string) +func (form formSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, form, tagValue, opt) +} + +func MappingByPtr(ptr interface{}, setter setter, tag string) error { + _, err := mapping(reflect.ValueOf(ptr), emptyField, setter, tag) + return err +} + +func mapping(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + if field.Tag.Get(tag) == "-" { // just ignoring this field + return false, nil + } + + var vKind = value.Kind() + + if vKind == reflect.Ptr { + var isNew bool + vPtr := value + if value.IsNil() { + isNew = true + vPtr = reflect.New(value.Type().Elem()) + } + isSetted, err := mapping(vPtr.Elem(), field, setter, tag) + if err != nil { + return false, err + } + if isNew && isSetted { + value.Set(vPtr) + } + return isSetted, nil + } + + if vKind != reflect.Struct || !field.Anonymous { + ok, err := tryToSetValue(value, field, setter, tag) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + + if vKind == reflect.Struct { + tValue := value.Type() + + var isSetted bool + for i := 0; i < value.NumField(); i++ { + sf := tValue.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + ok, err := mapping(value.Field(i), tValue.Field(i), setter, tag) + if err != nil { + return false, err + } + isSetted = isSetted || ok + } + return isSetted, nil + } + return false, nil +} + +type setOptions struct { + isDefaultExists bool + defaultValue string +} + +func tryToSetValue(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + var tagValue string + var setOpt setOptions + + tagValue = field.Tag.Get(tag) + tagValue, opts := head(tagValue, ",") + + if tagValue == "" { // default value is FieldName + tagValue = field.Name + } + if tagValue == "" { // when field is "emptyField" variable + return false, nil + } + + var opt string + for len(opts) > 0 { + opt, opts = head(opts, ",") + + if k, v := head(opt, "="); k == "default" { + setOpt.isDefaultExists = true + setOpt.defaultValue = v + } + } + + return setter.TrySet(value, field, tagValue, setOpt) +} + +func setByForm(value reflect.Value, field reflect.StructField, form map[string][]string, tagValue string, opt setOptions) (isSetted bool, err error) { + vs, ok := form[tagValue] + if !ok && !opt.isDefaultExists { + return false, nil + } + + switch value.Kind() { + case reflect.Slice: + if !ok { + vs = []string{opt.defaultValue} + } + return true, setSlice(vs, value, field) + case reflect.Array: + if !ok { + vs = []string{opt.defaultValue} + } + if len(vs) != value.Len() { + return false, fmt.Errorf("%q is not valid value for %s", vs, value.Type().String()) + } + return true, setArray(vs, value, field) + default: + var val string + if !ok { + val = opt.defaultValue + } + + if len(vs) > 0 { + val = vs[0] + } + return true, setWithProperType(val, value, field) + } +} + +func setWithProperType(val string, value reflect.Value, field reflect.StructField) error { + switch value.Kind() { + case reflect.Int: + return setIntField(val, 0, value) + case reflect.Int8: + return setIntField(val, 8, value) + case reflect.Int16: + return setIntField(val, 16, value) + case reflect.Int32: + return setIntField(val, 32, value) + case reflect.Int64: + switch value.Interface().(type) { + case time.Duration: + return setTimeDuration(val, value, field) + } + return setIntField(val, 64, value) + case reflect.Uint: + return setUintField(val, 0, value) + case reflect.Uint8: + return setUintField(val, 8, value) + case reflect.Uint16: + return setUintField(val, 16, value) + case reflect.Uint32: + return setUintField(val, 32, value) + case reflect.Uint64: + return setUintField(val, 64, value) + case reflect.Bool: + return setBoolField(val, value) + case reflect.Float32: + return setFloatField(val, 32, value) + case reflect.Float64: + return setFloatField(val, 64, value) + case reflect.String: + value.SetString(val) + case reflect.Struct: + switch value.Interface().(type) { + case time.Time: + return setTimeField(val, field, value) + } + return json.Unmarshal([]byte(val), value.Addr().Interface()) + case reflect.Map: + return json.Unmarshal([]byte(val), value.Addr().Interface()) + default: + return errUnknownType + } + return nil +} + +func setIntField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + intVal, err := strconv.ParseInt(val, 10, bitSize) + if err == nil { + field.SetInt(intVal) + } + return err +} + +func setUintField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + uintVal, err := strconv.ParseUint(val, 10, bitSize) + if err == nil { + field.SetUint(uintVal) + } + return err +} + +func setBoolField(val string, field reflect.Value) error { + if val == "" { + val = "false" + } + boolVal, err := strconv.ParseBool(val) + if err == nil { + field.SetBool(boolVal) + } + return err +} + +func setFloatField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0.0" + } + floatVal, err := strconv.ParseFloat(val, bitSize) + if err == nil { + field.SetFloat(floatVal) + } + return err +} + +func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { + timeFormat := structField.Tag.Get("time_format") + if timeFormat == "" { + timeFormat = time.RFC3339 + } + + switch tf := strings.ToLower(timeFormat); tf { + case "unix", "unixnano": + tv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + + d := time.Duration(1) + if tf == "unixnano" { + d = time.Second + } + + t := time.Unix(tv/int64(d), tv%int64(d)) + value.Set(reflect.ValueOf(t)) + return nil + + } + + if val == "" { + value.Set(reflect.ValueOf(time.Time{})) + return nil + } + + l := time.Local + if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC { + l = time.UTC + } + + if locTag := structField.Tag.Get("time_location"); locTag != "" { + loc, err := time.LoadLocation(locTag) + if err != nil { + return err + } + l = loc + } + + t, err := time.ParseInLocation(timeFormat, val, l) + if err != nil { + return err + } + + value.Set(reflect.ValueOf(t)) + return nil +} + +func setArray(vals []string, value reflect.Value, field reflect.StructField) error { + for i, s := range vals { + err := setWithProperType(s, value.Index(i), field) + if err != nil { + return err + } + } + return nil +} + +func setSlice(vals []string, value reflect.Value, field reflect.StructField) error { + slice := reflect.MakeSlice(value.Type(), len(vals), len(vals)) + err := setArray(vals, slice, field) + if err != nil { + return err + } + value.Set(slice) + return nil +} + +func setTimeDuration(val string, value reflect.Value, field reflect.StructField) error { + d, err := time.ParseDuration(val) + if err != nil { + return err + } + value.Set(reflect.ValueOf(d)) + return nil +} + +func head(str, sep string) (head string, tail string) { + idx := strings.Index(str, sep) + if idx < 0 { + return str, "" + } + return str[:idx], str[idx+len(sep):] +} + +func setFormMap(ptr interface{}, form map[string][]string) error { + el := reflect.TypeOf(ptr).Elem() + + if el.Kind() == reflect.Slice { + ptrMap, ok := ptr.(map[string][]string) + if !ok { + return errors.New("cannot convert to map slices of strings") + } + for k, v := range form { + ptrMap[k] = v + } + + return nil + } + + ptrMap, ok := ptr.(map[string]string) + if !ok { + return errors.New("cannot convert to map of strings") + } + for k, v := range form { + ptrMap[k] = v[len(v)-1] // pick last + } + + return nil +} diff --git a/vendor/github.com/shenghui0779/yiigo/grpc.go b/vendor/github.com/shenghui0779/yiigo/grpc.go new file mode 100644 index 00000000..226dd4c2 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/grpc.go @@ -0,0 +1,167 @@ +package yiigo + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/shenghui0779/vitess_pool" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" +) + +// GrpcDialer grpc client dial function +type GrpcDialer func() (*grpc.ClientConn, error) + +// GrpcConn grpc client connection resource +type GrpcConn struct { + *grpc.ClientConn +} + +// Close closes the connection resource +func (gc *GrpcConn) Close() { + if err := gc.ClientConn.Close(); err != nil { + logger.Error("[yiigo] err client conn closed", zap.Error(err)) + } +} + +// GrpcPool grpc client pool resource +type GrpcPool interface { + // Get returns a connection resource from the pool. + // Context with timeout can specify the wait timeout for pool. + Get(ctx context.Context) (*GrpcConn, error) + + // Put returns a connection resource to the pool. + Put(gc *GrpcConn) +} + +// GrpcPoolConfig keeps the settings to setup grpc client connection pool. +type GrpcPoolConfig struct { + // Dialer is a function that can be used to create a client connection. + Dialer GrpcDialer `json:"dialer"` + + // Options optional settings to setup grpc client connection pool. + Options *PoolOptions `json:"options"` +} + +// PoolOptions optional settings to setup db connection. +type PoolOptions struct { + // PoolSize is the maximum number of possible resources in the pool. + // Use value -1 for no timeout and 0 for default. + // Default is 10. + PoolSize int `json:"pool_size"` + + // PoolPrefill is the number of resources to be pre-filled in the pool. + // Default is no pre-filled. + PoolPrefill int `json:"pool_prefill"` + + // IdleTimeout is the amount of time after which client closes idle connections. + // Use value -1 for no timeout and 0 for default. + // Default is 5 minutes. + IdleTimeout time.Duration `json:"idle_timeout"` +} + +func (o *PoolOptions) rebuild(opt *PoolOptions) { + if opt.PoolSize > 0 { + o.PoolSize = opt.PoolSize + } + + if opt.PoolPrefill > 0 { + o.PoolPrefill = opt.PoolPrefill + } + + if opt.IdleTimeout > 0 { + o.IdleTimeout = opt.IdleTimeout + } else { + if opt.IdleTimeout == -1 { + o.IdleTimeout = 0 + } + } +} + +type grpcResourcePool struct { + config *GrpcPoolConfig + pool *vitess_pool.ResourcePool + mutex sync.Mutex +} + +func (rp *grpcResourcePool) init() { + rp.mutex.Lock() + defer rp.mutex.Unlock() + + if rp.pool != nil && !rp.pool.IsClosed() { + return + } + + df := func() (vitess_pool.Resource, error) { + conn, err := rp.config.Dialer() + + if err != nil { + return nil, err + } + + return &GrpcConn{conn}, nil + } + + rp.pool = vitess_pool.NewResourcePool(df, rp.config.Options.PoolSize, rp.config.Options.PoolSize, rp.config.Options.IdleTimeout, rp.config.Options.PoolPrefill) +} + +func (rp *grpcResourcePool) Get(ctx context.Context) (*GrpcConn, error) { + if rp.pool.IsClosed() { + rp.init() + } + + resource, err := rp.pool.Get(ctx) + + if err != nil { + return nil, err + } + + rc := resource.(*GrpcConn) + + // If rc is in unexpected state, close and reconnect + if state := rc.GetState(); state == connectivity.TransientFailure || state == connectivity.Shutdown { + logger.Warn(fmt.Sprintf("[yiigo] err pool conn state: %s, reconnect", state.String())) + + conn, err := rp.config.Dialer() + + if err != nil { + rp.pool.Put(rc) + + return nil, err + } + + rc.Close() + + return &GrpcConn{conn}, nil + } + + return rc, nil +} + +func (rp *grpcResourcePool) Put(conn *GrpcConn) { + rp.pool.Put(conn) +} + +// NewGrpcPool returns a new grpc client connection pool. +func NewGrpcPool(cfg *GrpcPoolConfig) GrpcPool { + pool := &grpcResourcePool{ + config: &GrpcPoolConfig{ + Dialer: cfg.Dialer, + Options: &PoolOptions{ + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, + }, + } + + if cfg.Options != nil { + pool.config.Options.rebuild(cfg.Options) + } + + pool.init() + + return pool +} diff --git a/vendor/github.com/shenghui0779/yiigo/hash.go b/vendor/github.com/shenghui0779/yiigo/hash.go new file mode 100644 index 00000000..d7f68456 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/hash.go @@ -0,0 +1,108 @@ +package yiigo + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "hash" +) + +// HashAlgo hash algorithm +type HashAlgo string + +const ( + AlgoMD5 HashAlgo = "md5" + AlgoSha1 HashAlgo = "sha1" + AlgoSha224 HashAlgo = "sha224" + AlgoSha256 HashAlgo = "sha256" + AlgoSha384 HashAlgo = "sha384" + AlgoSha512 HashAlgo = "sha512" +) + +// MD5 calculates the md5 hash of a string. +func MD5(s string) string { + h := md5.New() + h.Write([]byte(s)) + + return hex.EncodeToString(h.Sum(nil)) +} + +// SHA1 calculates the sha1 hash of a string. +func SHA1(s string) string { + h := sha1.New() + h.Write([]byte(s)) + + return hex.EncodeToString(h.Sum(nil)) +} + +// SHA256 calculates the sha256 hash of a string. +func SHA256(s string) string { + h := sha256.New() + h.Write([]byte(s)) + + return hex.EncodeToString(h.Sum(nil)) +} + +// Hash generates a hash value, expects: MD5, SHA1, SHA224, SHA256, SHA384, SHA512. +func Hash(algo HashAlgo, s string) string { + var h hash.Hash + + switch algo { + case AlgoMD5: + h = md5.New() + case AlgoSha1: + h = sha1.New() + case AlgoSha224: + h = sha256.New224() + case AlgoSha256: + h = sha256.New() + case AlgoSha384: + h = sha512.New384() + case AlgoSha512: + h = sha512.New() + default: + return s + } + + h.Write([]byte(s)) + + return hex.EncodeToString(h.Sum(nil)) +} + +// HMacSHA256 generates a keyed sha256 hash value. +func HMacSHA256(s, key string) string { + mac := hmac.New(sha256.New, []byte(key)) + + mac.Write([]byte(s)) + + return hex.EncodeToString(mac.Sum(nil)) +} + +// HMac generates a keyed hash value, expects: MD5, SHA1, SHA224, SHA256, SHA384, SHA512. +func HMac(algo HashAlgo, s, key string) string { + var mac hash.Hash + + switch algo { + case AlgoMD5: + mac = hmac.New(md5.New, []byte(key)) + case AlgoSha1: + mac = hmac.New(sha1.New, []byte(key)) + case AlgoSha224: + mac = hmac.New(sha256.New224, []byte(key)) + case AlgoSha256: + mac = hmac.New(sha256.New, []byte(key)) + case AlgoSha384: + mac = hmac.New(sha512.New384, []byte(key)) + case AlgoSha512: + mac = hmac.New(sha512.New, []byte(key)) + default: + return s + } + + mac.Write([]byte(s)) + + return hex.EncodeToString(mac.Sum(nil)) +} diff --git a/vendor/github.com/shenghui0779/yiigo/helper.go b/vendor/github.com/shenghui0779/yiigo/helper.go new file mode 100644 index 00000000..71b2d4e3 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/helper.go @@ -0,0 +1,260 @@ +package yiigo + +import ( + "encoding/xml" + "net" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-version" + "go.uber.org/zap" +) + +var timezone = time.FixedZone("CST", 8*3600) + +const ( + layoutdate = "2006-01-02" + layouttime = "2006-01-02 15:04:05" +) + +const ( + // Default defines for `default` name + Default = "default" + + // OK + OK = "OK" +) + +// X is a convenient alias for a map[string]interface{}. +type X map[string]interface{} + +// CDATA XML CDATA section which is defined as blocks of text that are not parsed by the parser, but are otherwise recognized as markup. +type CDATA string + +// MarshalXML encodes the receiver as zero or more XML elements. +func (c CDATA) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(struct { + string `xml:",cdata"` + }{string(c)}, start) +} + +// SetTimezone sets timezone for time display. +// The default timezone is GMT+8. +func SetTimezone(loc *time.Location) { + timezone = loc +} + +// Date format a local time/date and +// returns a string formatted according to the given layout using the given timestamp of int64. +// If timestamp < 0, use `time.Now()` to format. +// The default layout is: 2006-01-02 15:04:05. +func Date(timestamp int64, layout ...string) string { + l := layouttime + + if len(layout) != 0 { + l = layout[0] + } + + if timestamp < 0 { + return time.Now().In(timezone).Format(l) + } + + return time.Unix(timestamp, 0).In(timezone).Format(l) +} + +// StrToTime Parse English textual datetime description into a Unix timestamp. +// The default layout is: 2006-01-02 15:04:05 +func StrToTime(datetime string, layout ...string) int64 { + l := layouttime + + if len(layout) != 0 { + l = layout[0] + } + + t, err := time.ParseInLocation(l, datetime, timezone) + + if err != nil { + logger.Error("[yiigo] err parse time", zap.Error(err), zap.String("datetime", datetime), zap.String("layout", l)) + + return 0 + } + + return t.Unix() +} + +// WeekAround returns the monday and sunday of the week for the given time. +// The default layout is: 2006-01-02 +func WeekAround(timestamp int64, layout ...string) (monday, sunday string) { + t := time.Unix(timestamp, 0).In(timezone) + + weekday := t.Weekday() + + // monday + offset := int(time.Monday - weekday) + + if offset > 0 { + offset = -6 + } + + today := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, timezone) + + l := layoutdate + + if len(layout) != 0 { + l = layout[0] + } + + monday = today.AddDate(0, 0, offset).Format(l) + + // sunday + offset = int(time.Sunday - weekday) + + if offset < 0 { + offset += 7 + } + + sunday = today.AddDate(0, 0, offset).Format(l) + + return +} + +// IP2Long converts a string containing an (IPv4) Internet Protocol dotted address into an uint32 integer. +func IP2Long(ip string) uint32 { + ipv4 := net.ParseIP(ip).To4() + + if ipv4 == nil { + return 0 + } + + return uint32(ipv4[0])<<24 | uint32(ipv4[1])<<16 | uint32(ipv4[2])<<8 | uint32(ipv4[3]) +} + +// Long2IP converts an uint32 integer address into a string in (IPv4) Internet standard dotted format. +func Long2IP(ip uint32) string { + return net.IPv4(byte(ip>>24), byte(ip>>16), byte(ip>>8), byte(ip)).String() +} + +// AddSlashes returns a string with backslashes added before characters that need to be escaped. +func AddSlashes(s string) string { + var builder strings.Builder + + for _, ch := range s { + if ch == '\'' || ch == '"' || ch == '\\' { + builder.WriteRune('\\') + } + + builder.WriteRune(ch) + } + + return builder.String() +} + +// StripSlashes returns a string with backslashes stripped off. (\' becomes ' and so on.) Double backslashes (\\) are made into a single backslash (\). +func StripSlashes(s string) string { + var builder strings.Builder + + l, skip := len(s), false + + for i, ch := range s { + if skip { + builder.WriteRune(ch) + skip = false + + continue + } + + if ch == '\\' { + if i+1 < l && s[i+1] == '\\' { + skip = true + } + + continue + } + + builder.WriteRune(ch) + } + + return builder.String() +} + +// QuoteMeta returns a version of str with a backslash character (\) before every character that is among these: . \ + * ? [ ^ ] ( $ ) +func QuoteMeta(s string) string { + var builder strings.Builder + + for _, ch := range s { + switch ch { + case '.', '+', '\\', '(', '$', ')', '[', '^', ']', '*', '?': + builder.WriteRune('\\') + } + + builder.WriteRune(ch) + } + + return builder.String() +} + +// CreateFile creates or truncates the named file. +// If the file already exists, it is truncated. +// If the directory or file does not exist, it is created with mode 0775 +func CreateFile(filename string) (*os.File, error) { + abspath, err := filepath.Abs(filename) + + if err != nil { + return nil, err + } + + if err = os.MkdirAll(path.Dir(abspath), 0775); err != nil { + return nil, err + } + + return os.OpenFile(abspath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0775) +} + +// OpenFile opens the named file. +// If the file already exists, appends data to it when writing. +// If the directory or file does not exist, it is created with mode 0775 +func OpenFile(filename string) (*os.File, error) { + abspath, err := filepath.Abs(filename) + + if err != nil { + return nil, err + } + + if err = os.MkdirAll(path.Dir(abspath), 0775); err != nil { + return nil, err + } + + return os.OpenFile(abspath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0775) +} + +// VersionCompare compares semantic versions range, support: >, >=, =, !=, <, <=, | (or), & (and). +// Param `rangeVer` eg: 1.0.0, =1.0.0, >2.0.0, >=1.0.0&<2.0.0, <2.0.0|>3.0.0, !=4.0.4 +func VersionCompare(rangeVer, curVer string) (bool, error) { + semVer, err := version.NewVersion(curVer) + + // invalid semantic version + if err != nil { + return false, err + } + + orVers := strings.Split(rangeVer, "|") + + for _, ver := range orVers { + andVers := strings.Split(ver, "&") + + constraints, err := version.NewConstraint(strings.Join(andVers, ",")) + + if err != nil { + return false, err + } + + if constraints.Check(semVer) { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/shenghui0779/yiigo/http.go b/vendor/github.com/shenghui0779/yiigo/http.go new file mode 100644 index 00000000..9172e500 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/http.go @@ -0,0 +1,267 @@ +package yiigo + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "mime/multipart" + "net" + "net/http" + "net/url" + "time" +) + +// httpSetting http request setting +type httpSetting struct { + headers map[string]string + cookies []*http.Cookie + close bool +} + +// HTTPOption configures how we set up the http request. +type HTTPOption func(s *httpSetting) + +// WithHTTPHeader specifies the header to http request. +func WithHTTPHeader(key, value string) HTTPOption { + return func(s *httpSetting) { + s.headers[key] = value + } +} + +// WithHTTPCookies specifies the cookies to http request. +func WithHTTPCookies(cookies ...*http.Cookie) HTTPOption { + return func(s *httpSetting) { + s.cookies = cookies + } +} + +// WithHTTPClose specifies close the connection after +// replying to this request (for servers) or after sending this +// request and reading its response (for clients). +func WithHTTPClose() HTTPOption { + return func(s *httpSetting) { + s.close = true + } +} + +// UploadForm is the interface for http upload. +type UploadForm interface { + // Write writes fields to multipart writer + Write(w *multipart.Writer) error +} + +// FormFileFunc writes file content to multipart writer. +type FormFileFunc func(w io.Writer) error + +type formfile struct { + fieldname string + filename string + filefunc FormFileFunc +} + +type uploadform struct { + formfiles []*formfile + formfields map[string]string +} + +func (f *uploadform) Write(w *multipart.Writer) error { + if len(f.formfiles) == 0 { + return errors.New("empty file field") + } + + for _, v := range f.formfiles { + part, err := w.CreateFormFile(v.fieldname, v.filename) + + if err != nil { + return err + } + + if err = v.filefunc(part); err != nil { + return err + } + } + + for name, value := range f.formfields { + if err := w.WriteField(name, value); err != nil { + return err + } + } + + return nil +} + +// UploadField configures how we set up the upload from. +type UploadField func(f *uploadform) + +// WithFormFile specifies the file field to upload from. +func WithFormFile(fieldname, filename string, fn FormFileFunc) UploadField { + return func(f *uploadform) { + f.formfiles = append(f.formfiles, &formfile{ + fieldname: fieldname, + filename: filename, + filefunc: fn, + }) + } +} + +// WithFormField specifies the form field to upload from. +func WithFormField(fieldname, fieldvalue string) UploadField { + return func(u *uploadform) { + u.formfields[fieldname] = fieldvalue + } +} + +// NewUploadForm returns an upload form +func NewUploadForm(fields ...UploadField) UploadForm { + form := &uploadform{ + formfiles: make([]*formfile, 0), + formfields: make(map[string]string), + } + + for _, f := range fields { + f(form) + } + + return form +} + +// HTTPClient is the interface for a http client. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + // Should use context to specify the timeout for request. + Do(ctx context.Context, method, reqURL string, body []byte, options ...HTTPOption) (*http.Response, error) + + // Upload issues a UPLOAD to the specified URL. + // Should use context to specify the timeout for request. + Upload(ctx context.Context, reqURL string, form UploadForm, options ...HTTPOption) (*http.Response, error) +} + +type httpclient struct { + client *http.Client +} + +func (c *httpclient) Do(ctx context.Context, method, reqURL string, body []byte, options ...HTTPOption) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, reqURL, bytes.NewBuffer(body)) + + if err != nil { + return nil, err + } + + setting := new(httpSetting) + + if len(options) != 0 { + setting.headers = make(map[string]string) + + for _, f := range options { + f(setting) + } + } + + // headers + if len(setting.headers) != 0 { + for k, v := range setting.headers { + req.Header.Set(k, v) + } + } + + // cookies + if len(setting.cookies) != 0 { + for _, v := range setting.cookies { + req.AddCookie(v) + } + } + + if setting.close { + req.Close = true + } + + resp, err := c.client.Do(req) + + if err != nil { + // If the context has been canceled, the context's error is probably more useful. + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + + return nil, err + } + + return resp, err +} + +func (c *httpclient) Upload(ctx context.Context, reqURL string, form UploadForm, options ...HTTPOption) (*http.Response, error) { + buf := bytes.NewBuffer(make([]byte, 0, 20<<10)) // 20kb + w := multipart.NewWriter(buf) + + if err := form.Write(w); err != nil { + return nil, err + } + + options = append(options, WithHTTPHeader("Content-Type", w.FormDataContentType())) + + // Don't forget to close the multipart writer. + // If you don't close it, your request will be missing the terminating boundary. + if err := w.Close(); err != nil { + return nil, err + } + + return c.Do(ctx, http.MethodPost, reqURL, buf.Bytes(), options...) +} + +// NewHTTPClient returns a new http client +func NewHTTPClient(client *http.Client) HTTPClient { + return &httpclient{ + client: client, + } +} + +// defaultHTTPClient default http client +var defaultHTTPClient = NewHTTPClient(&http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 60 * time.Second, + }).DialContext, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + MaxIdleConns: 0, + MaxIdleConnsPerHost: 1000, + MaxConnsPerHost: 1000, + IdleConnTimeout: 60 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, +}) + +// HTTPGet issues a GET to the specified URL. +func HTTPGet(ctx context.Context, reqURL string, options ...HTTPOption) (*http.Response, error) { + return defaultHTTPClient.Do(ctx, http.MethodGet, reqURL, nil, options...) +} + +// HTTPPost issues a POST to the specified URL. +func HTTPPost(ctx context.Context, reqURL string, body []byte, options ...HTTPOption) (*http.Response, error) { + return defaultHTTPClient.Do(ctx, http.MethodPost, reqURL, body, options...) +} + +// HTTPPostForm issues a POST to the specified URL, with data's keys and values URL-encoded as the request body. +func HTTPPostForm(ctx context.Context, reqURL string, data url.Values, options ...HTTPOption) (*http.Response, error) { + options = append(options, WithHTTPHeader("Content-Type", "application/x-www-form-urlencoded")) + + return defaultHTTPClient.Do(ctx, http.MethodPost, reqURL, []byte(data.Encode()), options...) +} + +// HTTPUpload issues a UPLOAD to the specified URL. +func HTTPUpload(ctx context.Context, reqURL string, form UploadForm, options ...HTTPOption) (*http.Response, error) { + return defaultHTTPClient.Upload(ctx, reqURL, form, options...) +} + +// HTTPDo sends an HTTP request and returns an HTTP response +func HTTPDo(ctx context.Context, method, reqURL string, body []byte, options ...HTTPOption) (*http.Response, error) { + return defaultHTTPClient.Do(ctx, method, reqURL, body, options...) +} diff --git a/vendor/github.com/shenghui0779/yiigo/init.go b/vendor/github.com/shenghui0779/yiigo/init.go new file mode 100644 index 00000000..a27ca7bd --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/init.go @@ -0,0 +1,103 @@ +package yiigo + +import ( + "path/filepath" + "strings" + "sync" + + "github.com/gorilla/websocket" +) + +// InitOption configures how we set up the yiigo initialization. +type InitOption func(wg *sync.WaitGroup) + +// WithMySQL register mysql db. +func WithMySQL(name string, cfg *DBConfig) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initDB(name, MySQL, cfg) + } +} + +// WithPostgres register postgres db. +func WithPostgres(name string, cfg *DBConfig) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initDB(name, Postgres, cfg) + } +} + +// WithSQLite register sqlite db. +func WithSQLite(name string, cfg *DBConfig) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initDB(name, SQLite, cfg) + } +} + +// WithMongo register mongodb. +// [DSN] mongodb://localhost:27017/?connectTimeoutMS=10000&minPoolSize=10&maxPoolSize=20&maxIdleTimeMS=60000&readPreference=primary +// [Reference] https://docs.mongodb.com/manual/reference/connection-string +func WithMongo(name string, dsn string) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initMongoDB(name, dsn) + } +} + +// WithRedis register redis. +func WithRedis(name string, cfg *RedisConfig) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initRedis(name, cfg) + } +} + +// WithNSQ initialize nsq. +func WithNSQ(nsqd string, lookupd []string, consumers ...NSQConsumer) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + initNSQ(nsqd, lookupd, consumers...) + } +} + +// WithLogger register logger. +func WithLogger(name string, cfg *LoggerConfig) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + if v := strings.TrimSpace(cfg.Filename); len(v) != 0 { + cfg.Filename = filepath.Clean(v) + } + + initLogger(name, cfg) + } +} + +// WithWebsocket specifies the websocket upgrader. +func WithWebsocket(upgrader *websocket.Upgrader) InitOption { + return func(wg *sync.WaitGroup) { + defer wg.Done() + + wsupgrader = upgrader + } +} + +// Init yiigo initialization. +func Init(options ...InitOption) { + var wg sync.WaitGroup + + for _, f := range options { + wg.Add(1) + + go f(&wg) + } + + wg.Wait() +} diff --git a/vendor/github.com/shenghui0779/yiigo/location.go b/vendor/github.com/shenghui0779/yiigo/location.go new file mode 100644 index 00000000..0da95c7c --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/location.go @@ -0,0 +1,309 @@ +package yiigo + +import ( + "fmt" + "math" +) + +// Location geographic location +type Location struct { + lng float64 + lat float64 +} + +// Longtitude returns longtitude +func (l *Location) Longtitude() float64 { + return l.lng +} + +// Latitude returns latitude +func (l *Location) Latitude() float64 { + return l.lat +} + +// String implements Stringer interface for print. +func (l *Location) String() string { + return fmt.Sprintf("(lng: %.16f, lat: %.16f)", l.lng, l.lat) +} + +// Distance calculates distance in meters with target location. +func (l *Location) Distance(t *Location) float64 { + R := 6378137.0 // radius of the earth + rad := math.Pi / 180.0 + + lng1 := l.lng * rad + lat1 := l.lat * rad + + lng2 := t.Longtitude() * rad + lat2 := t.Latitude() * rad + + theta := lng2 - lng1 + + dist := math.Sin(lat1)*math.Sin(lat2) + math.Cos(lat1)*math.Cos(lat2)*math.Cos(theta) + + return math.Acos(dist) * R +} + +// Azimuth calculates azimuth angle with target location. +func (l *Location) Azimuth(t *Location) float64 { + if t.Longtitude() == l.lng && t.Latitude() == l.lat { + return 0 + } + + if t.Longtitude() == l.lng { + if t.Latitude() > l.lat { + return 0 + } + + return 180 + } + + if t.Latitude() == l.lat { + if t.Longtitude() > l.lng { + return 90 + } + + return 270 + } + + rad := math.Pi / 180.0 + + a := (90 - t.Latitude()) * rad + b := (90 - l.lat) * rad + + AOC_BOC := (t.Longtitude() - l.lng) * rad + + cosc := math.Cos(a)*math.Cos(b) + math.Sin(a)*math.Sin(b)*math.Cos(AOC_BOC) + sinc := math.Sqrt(1 - cosc*cosc) + + sinA := math.Sin(a) * math.Sin(AOC_BOC) / sinc + + if sinA > 1 { + sinA = 1 + } + + if sinA < -1 { + sinA = -1 + } + + angle := math.Asin(sinA) / math.Pi * 180 + + if t.Latitude() < l.lat { + return 180 - angle + } + + if t.Longtitude() < l.lng { + return 360 + angle + } + + return angle +} + +// NewLocation returns a new location. +func NewLocation(lng, lat float64) *Location { + return &Location{ + lng: lng, + lat: lat, + } +} + +// Point coordinate point +type Point struct { + x float64 + y float64 + ml float64 +} + +// X returns x +func (p *Point) X() float64 { + return p.x +} + +// Y returns y +func (p *Point) Y() float64 { + return p.y +} + +// MeridianLine returns meridian line for conversion between point and location. +func (p *Point) MeridianLine() float64 { + return p.ml +} + +// String implements Stringer interface for print. +func (p *Point) String() string { + return fmt.Sprintf("(x: %.16f, y: %.16f)", p.x, p.y) +} + +// PointOption point option +type PointOption func(p *Point) + +// WithMeridianLine specifies the meridian line for point. +func WithMeridianLine(ml float64) PointOption { + return func(p *Point) { + p.ml = ml + } +} + +// NewPoint returns a new point. +func NewPoint(x, y float64, options ...PointOption) *Point { + p := &Point{ + x: x, + y: y, + } + + for _, f := range options { + f(p) + } + + return p +} + +// EllipsoidParameter params for ellipsoid. +type EllipsoidParameter struct { + A float64 + B float64 + F float64 + E2 float64 + EP2 float64 + C float64 + A0 float64 + A2 float64 + A4 float64 + A6 float64 +} + +// NewWGS84Parameter params for WGS84. +func NewWGS84Parameter() *EllipsoidParameter { + ep := &EllipsoidParameter{ + A: 6378137.0, + E2: 0.00669437999013, + } + + ep.B = math.Sqrt(ep.A * ep.A * (1 - ep.E2)) + ep.EP2 = (ep.A*ep.A - ep.B*ep.B) / (ep.B * ep.B) + ep.F = (ep.A - ep.B) / ep.A + + // f0 := 1 / 298.257223563; + // f1 := 1 / ep.F; + + ep.C = ep.A / (1 - ep.F) + + m0 := ep.A * (1 - ep.E2) + m2 := 1.5 * ep.E2 * m0 + m4 := 1.25 * ep.E2 * m2 + m6 := 7 * ep.E2 * m4 / 6 + m8 := 9 * ep.E2 * m6 / 8 + + ep.A0 = m0 + m2/2 + 3*m4/8 + 5*m6/16 + 35*m8/128 + ep.A2 = m2/2 + m4/2 + 15*m6/32 + 7*m8/16 + ep.A4 = m4/8 + 3*m6/16 + 7*m8/32 + ep.A6 = m6/32 + m8/16 + + return ep +} + +// ZtGeoCoordTransform 经纬度与大地平面直角坐标系间的转换 +type ZtGeoCoordTransform struct { + ep *EllipsoidParameter + meridianLine float64 + projType rune +} + +// NewZtGeoCoordTransform 返回经纬度与大地平面直角坐标系间的转换器 +// eg: zgct := yiigo.NewZtGeoCoordTransform(-360, 'g', NewWGS84Parameter()) +func NewZtGeoCoordTransform(ml float64, pt rune, ep *EllipsoidParameter) *ZtGeoCoordTransform { + return &ZtGeoCoordTransform{ + ep: ep, + meridianLine: ml, + projType: pt, + } +} + +// BL2XY 经纬度转大地平面直角坐标系点 +func (zt *ZtGeoCoordTransform) BL2XY(loc *Location) *Point { + meridianLine := zt.meridianLine + + if meridianLine < -180 { + meridianLine = float64(int((loc.Longtitude()+1.5)/3) * 3) + } + + lat := loc.Latitude() * 0.0174532925199432957692 + dL := (loc.Longtitude() - meridianLine) * 0.0174532925199432957692 + + X := zt.ep.A0*lat - zt.ep.A2*math.Sin(2*lat)/2 + zt.ep.A4*math.Sin(4*lat)/4 - zt.ep.A6*math.Sin(6*lat)/6 + + tn := math.Tan(lat) + tn2 := tn * tn + tn4 := tn2 * tn2 + + j2 := (1/math.Pow(1-zt.ep.F, 2) - 1) * math.Pow(math.Cos(lat), 2) + n := zt.ep.A / math.Sqrt(1.0-zt.ep.E2*math.Sin(lat)*math.Sin(lat)) + + var temp [6]float64 + + temp[0] = n * math.Sin(lat) * math.Cos(lat) * dL * dL / 2 + temp[1] = n * math.Sin(lat) * math.Pow(math.Cos(lat), 3) * (5 - tn2 + 9*j2 + 4*j2*j2) * math.Pow(dL, 4) / 24 + temp[2] = n * math.Sin(lat) * math.Pow(math.Cos(lat), 5) * (61 - 58*tn2 + tn4) * math.Pow(dL, 6) / 720 + temp[3] = n * math.Cos(lat) * dL + temp[4] = n * math.Pow(math.Cos(lat), 3) * (1 - tn2 + j2) * math.Pow(dL, 3) / 6 + temp[5] = n * math.Pow(math.Cos(lat), 5) * (5 - 18*tn2 + tn4 + 14*j2 - 58*tn2*j2) * math.Pow(dL, 5) / 120 + + px := temp[3] + temp[4] + temp[5] + py := X + temp[0] + temp[1] + temp[2] + + switch zt.projType { + case 'g': + px += 500000 + case 'u': + px = px*0.9996 + 500000 + py = py * 0.9996 + } + + return NewPoint(px, py, WithMeridianLine(meridianLine)) +} + +// XY2BL 大地平面直角坐标系点转经纬度 +func (zt *ZtGeoCoordTransform) XY2BL(p *Point) *Location { + x := p.X() - 500000 + y := p.Y() + + if zt.projType == 'u' { + x = x / 0.9996 + y = y / 0.9996 + } + + var ( + bf0 = y / zt.ep.A0 + bf float64 + threshould = 1.0 + ) + + for threshould > 0.00000001 { + y0 := -zt.ep.A2*math.Sin(2*bf0)/2 + zt.ep.A4*math.Sin(4*bf0)/4 - zt.ep.A6*math.Sin(6*bf0)/6 + bf = (y - y0) / zt.ep.A0 + + threshould = bf - bf0 + bf0 = bf + } + + t := math.Tan(bf) + j2 := zt.ep.EP2 * math.Pow(math.Cos(bf), 2) + + v := math.Sqrt(1 - zt.ep.E2*math.Sin(bf)*math.Sin(bf)) + n := zt.ep.A / v + m := zt.ep.A * (1 - zt.ep.E2) / math.Pow(v, 3) + + temp0 := t * x * x / (2 * m * n) + temp1 := t * (5 + 3*t*t + j2 - 9*j2*t*t) * math.Pow(x, 4) / (24 * m * math.Pow(n, 3)) + temp2 := t * (61 + 90*t*t + 45*math.Pow(t, 4)) * math.Pow(x, 6) / (720 * math.Pow(n, 5) * m) + + lat := (bf - temp0 + temp1 - temp2) * 57.29577951308232 + + temp0 = x / (n * math.Cos(bf)) + temp1 = (1 + 2*t*t + j2) * math.Pow(x, 3) / (6 * math.Pow(n, 3) * math.Cos(bf)) + temp2 = (5 + 28*t*t + 6*j2 + 24*math.Pow(t, 4) + 8*t*t*j2) * math.Pow(x, 5) / (120 * math.Pow(n, 5) * math.Cos(bf)) + + lng := (temp0-temp1+temp2)*57.29577951308232 + p.MeridianLine() + + return NewLocation(lng, lat) +} diff --git a/vendor/github.com/shenghui0779/yiigo/logger.go b/vendor/github.com/shenghui0779/yiigo/logger.go new file mode 100644 index 00000000..d206b9e1 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/logger.go @@ -0,0 +1,146 @@ +package yiigo + +import ( + "context" + "os" + "sync" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +// CtxLogger custom logger with context +type CtxLogger interface { + // Info logs a info message. + Info(ctx context.Context, msg string, fields ...zap.Field) + + // Warn logs a warning message. + Warn(ctx context.Context, msg string, fields ...zap.Field) + + // Err logs a error message. + Err(ctx context.Context, msg string, fields ...zap.Field) +} + +var ( + logger = debugLogger() + logMap sync.Map +) + +// LoggerConfig keeps the settings to configure logger. +type LoggerConfig struct { + // Filename is the file to write logs to. + Filename string `json:"filename"` + + // Options optional settings to configure logger. + Options *LoggerOptions `json:"options"` +} + +// LoggerOptions optional settings to configure logger. +type LoggerOptions struct { + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"max_size"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"max_age"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"max_backups"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress"` + + // Stderr specifies the stderr for logger + Stderr bool `json:"stderr"` + + // ZapOptions specifies the zap options stderr for logger + ZapOptions []zap.Option `json:"zap_options"` +} + +// newLogger returns a new logger. +func newLogger(cfg *LoggerConfig) *zap.Logger { + if len(cfg.Filename) == 0 { + return debugLogger(cfg.Options.ZapOptions...) + } + + c := zap.NewProductionEncoderConfig() + + c.TimeKey = "time" + c.EncodeTime = MyTimeEncoder + c.EncodeCaller = zapcore.FullCallerEncoder + + ws := make([]zapcore.WriteSyncer, 0, 2) + + ws = append(ws, zapcore.AddSync(&lumberjack.Logger{ + Filename: cfg.Filename, + MaxSize: cfg.Options.MaxSize, + MaxAge: cfg.Options.MaxAge, + MaxBackups: cfg.Options.MaxBackups, + Compress: cfg.Options.Compress, + LocalTime: true, + })) + + if cfg.Options.Stderr { + ws = append(ws, zapcore.Lock(os.Stderr)) + } + + core := zapcore.NewCore(zapcore.NewJSONEncoder(c), zapcore.NewMultiWriteSyncer(ws...), zap.DebugLevel) + + return zap.New(core, cfg.Options.ZapOptions...) +} + +func debugLogger(options ...zap.Option) *zap.Logger { + cfg := zap.NewDevelopmentConfig() + + cfg.DisableCaller = true + cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + cfg.EncoderConfig.EncodeTime = MyTimeEncoder + cfg.EncoderConfig.EncodeCaller = zapcore.FullCallerEncoder + + l, _ := cfg.Build(options...) + + return l +} + +func initLogger(name string, cfg *LoggerConfig) { + if cfg.Options == nil { + cfg.Options = new(LoggerOptions) + } + + l := newLogger(cfg) + + if name == Default { + logger = l + } + + logMap.Store(name, l) +} + +// Logger returns a logger +func Logger(name ...string) *zap.Logger { + if len(name) == 0 || name[0] == Default { + return logger + } + + v, ok := logMap.Load(name[0]) + + if !ok { + return logger + } + + return v.(*zap.Logger) +} + +// MyTimeEncoder zap time encoder. +func MyTimeEncoder(t time.Time, e zapcore.PrimitiveArrayEncoder) { + e.AppendString(t.In(timezone).Format(layouttime)) +} diff --git a/vendor/github.com/shenghui0779/yiigo/mongo.go b/vendor/github.com/shenghui0779/yiigo/mongo.go new file mode 100644 index 00000000..54f5e138 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/mongo.go @@ -0,0 +1,68 @@ +package yiigo + +import ( + "context" + "fmt" + "sync" + "time" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.uber.org/zap" +) + +var ( + defaultMongo *mongo.Client + mgoMap sync.Map +) + +func initMongoDB(name, dsn string) { + opts := options.Client().ApplyURI(dsn) + + client, err := mongo.Connect(context.Background(), opts) + + if err != nil { + logger.Panic(fmt.Sprintf("[yiigo] err mongodb.%s connect", name), zap.String("dsn", dsn), zap.Error(err)) + } + + timeout := 10 * time.Second + + if opts.ConnectTimeout != nil { + timeout = *opts.ConnectTimeout + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // verify connection + if err = client.Ping(ctx, opts.ReadPreference); err != nil { + logger.Panic(fmt.Sprintf("[yiigo] err mongodb.%s ping", name), zap.String("dsn", dsn), zap.Error(err)) + } + + if name == Default { + defaultMongo = client + } + + mgoMap.Store(name, client) + + logger.Info(fmt.Sprintf("[yiigo] mongodb.%s is OK", name)) +} + +// Mongo returns a mongo client. +func Mongo(name ...string) *mongo.Client { + if len(name) == 0 || name[0] == Default { + if defaultMongo == nil { + logger.Panic(fmt.Sprintf("[yiigo] unknown mongodb.%s (forgotten configure?)", Default)) + } + + return defaultMongo + } + + v, ok := mgoMap.Load(name[0]) + + if !ok { + logger.Panic(fmt.Sprintf("[yiigo] unknown mongodb.%s (forgotten configure?)", name[0])) + } + + return v.(*mongo.Client) +} diff --git a/vendor/github.com/shenghui0779/yiigo/mutex.go b/vendor/github.com/shenghui0779/yiigo/mutex.go new file mode 100644 index 00000000..e8a16054 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/mutex.go @@ -0,0 +1,134 @@ +package yiigo + +import ( + "context" + "runtime/debug" + "time" + + "github.com/gomodule/redigo/redis" + "go.uber.org/zap" +) + +// MutexHandler the function to execute after lock acquired. +type MutexHandler func(ctx context.Context) error + +// Mutex is a reader/writer mutual exclusion lock. +type Mutex interface { + // Acquire attempt to acquire lock at regular intervals. + Acquire(ctx context.Context, callback MutexHandler, interval, timeout time.Duration) error +} + +type distributed struct { + pool RedisPool + key string + expire int64 +} + +func (d *distributed) Acquire(ctx context.Context, callback MutexHandler, interval, timeout time.Duration) error { + mutexCtx := ctx + + if timeout > 0 { + var cancel context.CancelFunc + + mutexCtx, cancel = context.WithTimeout(mutexCtx, timeout) + defer cancel() + } + + conn, err := d.pool.Get(mutexCtx) + + if err != nil { + return err + } + + defer d.pool.Put(conn) + + ok, err := d.attempt(conn) + + if err != nil { + return err + } + + // if not ok, attempt regularly + if !ok { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-mutexCtx.Done(): + // timeout or canceled + return mutexCtx.Err() + case <-ticker.C: + ok, err = d.attempt(conn) + + if err != nil { + return err + } + } + + if ok { + break + } + } + } + + // release lock + defer func() { + defer conn.Do("DEL", d.key) + + if err := recover(); err != nil { + logger.Error("mutex callback panic", zap.Any("error", err), zap.ByteString("stack", debug.Stack())) + } + }() + + return callback(ctx) +} + +func (d *distributed) attempt(conn *RedisConn) (bool, error) { + // attempt to acquire lock with `setnx` + reply, err := redis.String(conn.Do("SET", d.key, time.Now().Nanosecond(), "EX", d.expire, "NX")) + + if err != nil && err != redis.ErrNil { + return false, err + } + + if reply == OK { + return true, nil + } + + return false, nil +} + +// MutexOption mutex option +type MutexOption func(d *distributed) + +// WithMutexRedis specifies redis pool for mutex. +func WithMutexRedis(name string) MutexOption { + return func(d *distributed) { + d.pool = Redis(name) + } +} + +// WithMutexExpire specifies expire seconds for mutex. +func WithMutexExpire(e time.Duration) MutexOption { + return func(d *distributed) { + if sec := int64(e.Seconds()); sec > 0 { + d.expire = sec + } + } +} + +// DistributedMutex returns a simple distributed mutual exclusion lock. +func DistributedMutex(key string, options ...MutexOption) Mutex { + mutex := &distributed{ + pool: defaultRedis, + key: key, + expire: 10, + } + + for _, f := range options { + f(mutex) + } + + return mutex +} diff --git a/vendor/github.com/shenghui0779/yiigo/nsq.go b/vendor/github.com/shenghui0779/yiigo/nsq.go new file mode 100644 index 00000000..c16ac101 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/nsq.go @@ -0,0 +1,155 @@ +package yiigo + +import ( + "time" + + "github.com/nsqio/go-nsq" + "go.uber.org/zap" +) + +var producer *nsq.Producer + +// NSQLogger NSQ logger +type NSQLogger struct{} + +// Output implements the NSQ logger interface +func (l *NSQLogger) Output(calldepth int, s string) error { + logger.Error(s, zap.Int("call_depth", calldepth)) + + return nil +} + +func initProducer(nsqd string) (err error) { + producer, err = nsq.NewProducer(nsqd, nsq.NewConfig()) + + if err != nil { + return + } + + producer.SetLogger(&NSQLogger{}, nsq.LogLevelError) + + return +} + +// NSQMessage NSQ message +type NSQMessage interface { + Bytes() ([]byte, error) + // Do message processing + Do() error +} + +// NSQPublish synchronously publishes a message body to the specified topic. +func NSQPublish(topic string, msg NSQMessage) error { + b, err := msg.Bytes() + + if err != nil { + return err + } + + return producer.Publish(topic, b) +} + +// NSQDeferredPublish synchronously publishes a message body to the specified topic +// where the message will queue at the channel level until the timeout expires. +func NSQDeferredPublish(topic string, msg NSQMessage, duration time.Duration) error { + b, err := msg.Bytes() + + if err != nil { + return err + } + + return producer.DeferredPublish(topic, duration, b) +} + +// NSQConsumer NSQ consumer +type NSQConsumer interface { + nsq.Handler + Topic() string + Channel() string + Attempts() uint16 + Config() *nsq.Config +} + +func setConsumers(lookupd []string, consumers ...NSQConsumer) error { + for _, c := range consumers { + cfg := c.Config() + + if cfg == nil { + cfg = nsq.NewConfig() + + cfg.LookupdPollInterval = time.Second + cfg.RDYRedistributeInterval = time.Second + cfg.MaxInFlight = 1000 + } + + // set attempt acount, default: 5 + if c.Attempts() > 0 { + if err := cfg.Set("max_attempts", c.Attempts()); err != nil { + return err + } + } + + nc, err := nsq.NewConsumer(c.Topic(), c.Channel(), cfg) + + if err != nil { + return err + } + + nc.SetLogger(&NSQLogger{}, nsq.LogLevelError) + nc.AddHandler(c) + + if err := nc.ConnectToNSQLookupds(lookupd); err != nil { + return err + } + } + + return nil +} + +func initNSQ(nsqd string, lookupd []string, consumers ...NSQConsumer) { + // init producer + if err := initProducer(nsqd); err != nil { + logger.Panic("[yiigo] err new producer", zap.Error(err)) + } + + // set consumers + if err := setConsumers(lookupd, consumers...); err != nil { + logger.Panic("[yiigo] err set consumer", zap.Error(err)) + } + + logger.Info("[yiigo] nsq is OK") +} + +// NextAttemptDelay returns the delay time for next attempt. +func NextAttemptDelay(attempts uint16) time.Duration { + var d time.Duration + + switch attempts { + case 0: + d = 5 * time.Second + case 1: + d = 10 * time.Second + case 2: + d = 15 * time.Second + case 3: + d = 30 * time.Second + case 4: + d = 1 * time.Minute + case 5: + d = 2 * time.Minute + case 6: + d = 5 * time.Minute + case 7: + d = 10 * time.Minute + case 8: + d = 15 * time.Minute + case 9: + d = 30 * time.Minute + case 10: + d = 1 * time.Hour + default: + d = 1 * time.Hour + } + + return d +} diff --git a/vendor/github.com/shenghui0779/yiigo/redis.go b/vendor/github.com/shenghui0779/yiigo/redis.go new file mode 100644 index 00000000..3c1d39d1 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/redis.go @@ -0,0 +1,317 @@ +package yiigo + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "sync" + "time" + + "github.com/gomodule/redigo/redis" + "github.com/shenghui0779/vitess_pool" + "go.uber.org/zap" +) + +// RedisConn redis connection resource +type RedisConn struct { + redis.Conn +} + +// Close closes the connection resource +func (rc *RedisConn) Close() { + if err := rc.Conn.Close(); err != nil { + logger.Error("[yiigo] err conn closed", zap.Error(err)) + } +} + +// RedisPool redis pool resource +type RedisPool interface { + // Get returns a connection resource from the pool. + // Context with timeout can specify the wait timeout for pool. + Get(ctx context.Context) (*RedisConn, error) + + // Put returns a connection resource to the pool. + Put(rc *RedisConn) +} + +// RedisConfig keeps the settings to setup redis connection. +type RedisConfig struct { + // Addr host:port address. + Addr string `json:"addr"` + + // Options optional settings to setup redis connection. + Options *RedisOptions `json:"options"` +} + +// RedisOptions optional settings to setup redis connection. +type RedisOptions struct { + // Dialer is a custom dial function for creating TCP connections, + // otherwise a net.Dialer customized via the other options is used. + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) `json:"dialer"` + + // Username to be used when connecting to the Redis server when Redis ACLs are used. + Username string `json:"username"` + + // Password to be used when connecting to the Redis server. + Password string `json:"password"` + + // Database to be selected when dialing a connection. + Database int `json:"database"` + + // ConnTimeout is the timeout for connecting to the Redis server. + // Use value -1 for no timeout and 0 for default. + // Default is 10 seconds. + ConnTimeout time.Duration `json:"conn_timeout"` + + // ReadTimeout is the timeout for reading a single command reply. + // Use value -1 for no timeout and 0 for default. + // Default is 10 seconds. + ReadTimeout time.Duration `json:"read_timeout"` + + // WriteTimeout is the timeout for writing a single command. + // Use value -1 for no timeout and 0 for default. + // Default is 10 seconds. + WriteTimeout time.Duration `json:"write_timeout"` + + // PoolSize is the maximum number of possible resources in the pool. + // Use value -1 for no timeout and 0 for default. + // Default is 10. + PoolSize int `json:"pool_size"` + + // PoolPrefill is the number of resources to be pre-filled in the pool. + // Default is no pre-filled. + PoolPrefill int `json:"pool_prefill"` + + // IdleTimeout is the amount of time after which client closes idle connections. + // Use value -1 for no timeout and 0 for default. + // Default is 5 minutes. + IdleTimeout time.Duration `json:"idle_timeout"` + + // TLSConfig to be used when a TLS connection is dialed. + TLSConfig *tls.Config `json:"tls_config"` +} + +func (o *RedisOptions) rebuild(opt *RedisOptions) { + o.Dialer = opt.Dialer + o.TLSConfig = opt.TLSConfig + + if len(opt.Username) != 0 { + o.Username = opt.Username + } + + if len(opt.Password) != 0 { + o.Password = opt.Password + } + + if opt.Database > 0 { + o.Database = opt.Database + } + + if opt.ConnTimeout > 0 { + o.ConnTimeout = opt.ConnTimeout + } else { + if opt.ConnTimeout == -1 { + o.ConnTimeout = 0 + } + } + + if opt.ReadTimeout > 0 { + o.ReadTimeout = opt.ReadTimeout + } else { + if opt.ReadTimeout == -1 { + o.ReadTimeout = 0 + } + } + + if opt.WriteTimeout > 0 { + o.WriteTimeout = opt.WriteTimeout + } else { + if opt.WriteTimeout == -1 { + o.WriteTimeout = 0 + } + } + + if opt.PoolSize > 0 { + o.PoolSize = opt.PoolSize + } + + if opt.PoolPrefill > 0 { + o.PoolPrefill = opt.PoolPrefill + } + + if opt.IdleTimeout > 0 { + o.IdleTimeout = opt.IdleTimeout + } else { + if opt.IdleTimeout == -1 { + o.IdleTimeout = 0 + } + } +} + +type redisResourcePool struct { + config *RedisConfig + pool *vitess_pool.ResourcePool + mutex sync.Mutex +} + +func (rp *redisResourcePool) dial() (redis.Conn, error) { + dialOptions := []redis.DialOption{ + redis.DialDatabase(rp.config.Options.Database), + redis.DialConnectTimeout(rp.config.Options.ConnTimeout), + redis.DialReadTimeout(rp.config.Options.ReadTimeout), + redis.DialWriteTimeout(rp.config.Options.WriteTimeout), + } + + if len(rp.config.Options.Username) != 0 { + dialOptions = append(dialOptions, redis.DialUsername(rp.config.Options.Username)) + } + + if len(rp.config.Options.Password) != 0 { + dialOptions = append(dialOptions, redis.DialPassword(rp.config.Options.Password)) + } + + if rp.config.Options.Dialer != nil { + dialOptions = append(dialOptions, redis.DialContextFunc(rp.config.Options.Dialer)) + } + + if rp.config.Options.TLSConfig != nil { + dialOptions = append(dialOptions, redis.DialTLSConfig(rp.config.Options.TLSConfig)) + } + + conn, err := redis.Dial("tcp", rp.config.Addr, dialOptions...) + + return conn, err +} + +func (rp *redisResourcePool) init() { + rp.mutex.Lock() + defer rp.mutex.Unlock() + + if rp.pool != nil && !rp.pool.IsClosed() { + return + } + + df := func() (vitess_pool.Resource, error) { + conn, err := rp.dial() + + if err != nil { + return nil, err + } + + return &RedisConn{conn}, nil + } + + rp.pool = vitess_pool.NewResourcePool(df, rp.config.Options.PoolSize, rp.config.Options.PoolSize, rp.config.Options.IdleTimeout, rp.config.Options.PoolPrefill) +} + +func (rp *redisResourcePool) Get(ctx context.Context) (*RedisConn, error) { + if rp.pool.IsClosed() { + rp.init() + } + + resource, err := rp.pool.Get(ctx) + + if err != nil { + return nil, err + } + + rc := resource.(*RedisConn) + + // If rc is error, close and reconnect + if err = rc.Err(); err != nil { + logger.Warn("[yiigo] err pool conn, reconnect", zap.Error(err)) + + conn, dialErr := rp.dial() + + if dialErr != nil { + rp.pool.Put(rc) + + return nil, dialErr + } + + rc.Close() + + return &RedisConn{conn}, nil + } + + return rc, nil +} + +func (rp *redisResourcePool) Put(conn *RedisConn) { + rp.pool.Put(conn) +} + +var ( + defaultRedis RedisPool + redisMap sync.Map +) + +func newRedisPool(cfg *RedisConfig) RedisPool { + pool := &redisResourcePool{ + config: &RedisConfig{ + Addr: cfg.Addr, + Options: &RedisOptions{ + ConnTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + PoolSize: 10, + IdleTimeout: 5 * time.Minute, + }, + }, + } + + if cfg.Options != nil { + pool.config.Options.rebuild(cfg.Options) + } + + pool.init() + + return pool +} + +func initRedis(name string, cfg *RedisConfig) { + pool := newRedisPool(cfg) + + // verify connection + conn, err := pool.Get(context.TODO()) + + if err != nil { + logger.Panic(fmt.Sprintf("[yiigo] err redis.%s pool", name), zap.String("addr", cfg.Addr), zap.Error(err)) + } + + if _, err = conn.Do("PING"); err != nil { + conn.Close() + + logger.Panic(fmt.Sprintf("[yiigo] err redis.%s ping", name), zap.String("addr", cfg.Addr), zap.Error(err)) + } + + pool.Put(conn) + + if name == Default { + defaultRedis = pool + } + + redisMap.Store(name, pool) + + logger.Info(fmt.Sprintf("[yiigo] redis.%s is OK", name)) +} + +// Redis returns a redis pool. +func Redis(name ...string) RedisPool { + if len(name) == 0 || name[0] == Default { + if defaultRedis == nil { + logger.Panic(fmt.Sprintf("[yiigo] unknown redis.%s (forgotten configure?)", Default)) + } + + return defaultRedis + } + + v, ok := redisMap.Load(name[0]) + + if !ok { + logger.Panic(fmt.Sprintf("[yiigo] unknown redis.%s (forgotten configure?)", name[0])) + } + + return v.(RedisPool) +} diff --git a/vendor/github.com/shenghui0779/yiigo/sql_builder.go b/vendor/github.com/shenghui0779/yiigo/sql_builder.go new file mode 100644 index 00000000..927f1eeb --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/sql_builder.go @@ -0,0 +1,1016 @@ +package yiigo + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/jmoiron/sqlx" + "go.uber.org/zap" +) + +var ( + // ErrInvalidUpsertData invalid insert or update data. + ErrInvalidUpsertData = errors.New("invaild data, expects struct, *struct, yiigo.X") + // ErrInvalidBatchInsertData invalid batch insert data. + ErrInvalidBatchInsertData = errors.New("invaild data, expects []struct, []*struct, []yiigo.X") +) + +// SQLBuilder is the interface for wrapping query options. +type SQLBuilder interface { + // Wrap wrapping query options + Wrap(options ...QueryOption) SQLWrapper +} + +// SQLWrapper is the interface for building sql statement. +type SQLWrapper interface { + // ToQuery returns query statement and binds. + ToQuery(ctx context.Context) (string, []interface{}) + + // ToInsert returns insert statement and binds. + // data expects `struct`, `*struct`, `yiigo.X`. + ToInsert(ctx context.Context, data interface{}) (string, []interface{}) + + // ToBatchInsert returns batch insert statement and binds. + // data expects `[]struct`, `[]*struct`, `[]yiigo.X`. + ToBatchInsert(ctx context.Context, data interface{}) (string, []interface{}) + + // ToUpdate returns update statement and binds. + // data expects `struct`, `*struct`, `yiigo.X`. + ToUpdate(ctx context.Context, data interface{}) (string, []interface{}) + + // ToDelete returns delete statement and binds. + ToDelete(ctx context.Context) (string, []interface{}) + + // ToTruncate returns truncate statement + ToTruncate(ctx context.Context) string +} + +type sqlLogger struct{} + +func (l *sqlLogger) Info(ctx context.Context, msg string, fields ...zap.Field) { + logger.Info(fmt.Sprintf("[SQL] %s", msg), fields...) +} + +func (l *sqlLogger) Warn(ctx context.Context, msg string, fields ...zap.Field) {} + +func (l *sqlLogger) Err(ctx context.Context, msg string, fields ...zap.Field) { + logger.Error(fmt.Sprintf("[SQL] %s", msg), fields...) +} + +type queryBuilder struct { + driver DBDriver + logger CtxLogger + debug bool +} + +func (b *queryBuilder) Wrap(options ...QueryOption) SQLWrapper { + wrapper := &queryWrapper{ + builder: b, + columns: []string{"*"}, + } + + for _, f := range options { + f(wrapper) + } + + return wrapper +} + +// NewSQLBuilder returns new SQLBuilder +func NewSQLBuilder(driver DBDriver, options ...BuilderOption) SQLBuilder { + builder := &queryBuilder{ + driver: driver, + logger: new(sqlLogger), + } + + for _, f := range options { + f(builder) + } + + return builder +} + +// NewMySQLBuilder returns new SQLBuilder for MySQL +func NewMySQLBuilder(options ...BuilderOption) SQLBuilder { + return NewSQLBuilder(MySQL, options...) +} + +// NewPGSQLBuilder returns new SQLBuilder for Postgres +func NewPGSQLBuilder(options ...BuilderOption) SQLBuilder { + return NewSQLBuilder(Postgres, options...) +} + +// NewSQLiteBuilder returns new SQLBuilder for SQLite +func NewSQLiteBuilder(options ...BuilderOption) SQLBuilder { + return NewSQLBuilder(SQLite, options...) +} + +// SQLClause SQL clause +type SQLClause struct { + table string + keyword string + query string + binds []interface{} +} + +// Clause returns sql clause, eg: yiigo.Clause("price * ? + ?", 2, 100). +func Clause(query string, binds ...interface{}) *SQLClause { + return &SQLClause{ + query: query, + binds: binds, + } +} + +type queryWrapper struct { + builder *queryBuilder + table string + columns []string + where *SQLClause + joins []*SQLClause + groups []string + having *SQLClause + orders []string + offset int + limit int + unions []*SQLClause + distinct bool + whereIn bool +} + +func (w *queryWrapper) ToQuery(ctx context.Context) (string, []interface{}) { + query, binds := w.subquery() + + // unions + if l := len(w.unions); l != 0 { + var builder strings.Builder + + builder.WriteString("(") + builder.WriteString(query) + builder.WriteString(")") + + for _, v := range w.unions { + builder.WriteString(" ") + builder.WriteString(v.keyword) + builder.WriteString(" (") + builder.WriteString(v.query) + builder.WriteString(")") + + binds = append(binds, v.binds...) + } + + query = builder.String() + } + + // where in + if w.whereIn { + var err error + + query, binds, err = sqlx.In(query, binds...) + + if err != nil { + w.builder.logger.Err(ctx, "err build query", zap.Error(err)) + + return "", nil + } + } + + query = sqlx.Rebind(sqlx.BindType(string(w.builder.driver)), query) + + if w.builder.debug { + w.builder.logger.Info(ctx, query, zap.Any("args", binds)) + } + + return query, binds +} + +func (w *queryWrapper) subquery() (string, []interface{}) { + binds := make([]interface{}, 0) + + var builder strings.Builder + + builder.WriteString("SELECT ") + + if w.distinct { + builder.WriteString("DISTINCT ") + } + + builder.WriteString(w.columns[0]) + + for _, column := range w.columns[1:] { + builder.WriteString(", ") + builder.WriteString(column) + } + + builder.WriteString(" FROM ") + builder.WriteString(w.table) + + if len(w.joins) != 0 { + for _, join := range w.joins { + builder.WriteString(" ") + builder.WriteString(join.keyword) + builder.WriteString(" JOIN ") + builder.WriteString(join.table) + + if len(join.query) != 0 { + builder.WriteString(" ON ") + builder.WriteString(join.query) + } + } + } + + if w.where != nil { + builder.WriteString(" WHERE ") + builder.WriteString(w.where.query) + + binds = append(binds, w.where.binds...) + } + + if len(w.groups) != 0 { + builder.WriteString(" GROUP BY ") + builder.WriteString(w.groups[0]) + + for _, column := range w.groups[1:] { + builder.WriteString(", ") + builder.WriteString(column) + } + } + + if w.having != nil { + builder.WriteString(" HAVING ") + builder.WriteString(w.having.query) + + binds = append(binds, w.having.binds...) + } + + if len(w.orders) != 0 { + builder.WriteString(" ORDER BY ") + builder.WriteString(w.orders[0]) + + for _, column := range w.orders[1:] { + builder.WriteString(", ") + builder.WriteString(column) + } + } + + if w.limit != 0 { + builder.WriteString(" LIMIT ?") + binds = append(binds, w.limit) + } + + if w.offset != 0 { + builder.WriteString(" OFFSET ?") + binds = append(binds, w.offset) + } + + return builder.String(), binds +} + +func (w *queryWrapper) ToInsert(ctx context.Context, data interface{}) (string, []interface{}) { + var ( + columns []string + binds []interface{} + ) + + v := reflect.Indirect(reflect.ValueOf(data)) + + switch v.Kind() { + case reflect.Map: + x, ok := data.(X) + + if !ok { + w.builder.logger.Err(ctx, "err build insert", zap.Error(ErrInvalidUpsertData)) + + return "", nil + } + + columns, binds = w.insertWithMap(x) + case reflect.Struct: + columns, binds = w.insertWithStruct(v) + default: + w.builder.logger.Err(ctx, "err build insert", zap.Error(ErrInvalidUpsertData)) + + return "", nil + } + + columnLen := len(columns) + + if columnLen == 0 { + return "", nil + } + + var builder strings.Builder + + builder.WriteString("INSERT INTO ") + builder.WriteString(w.table) + builder.WriteString(" (") + builder.WriteString(columns[0]) + + for _, column := range columns[1:] { + builder.WriteString(", ") + builder.WriteString(column) + } + + builder.WriteString(") VALUES (?") + + for i := 1; i < columnLen; i++ { + builder.WriteString(", ?") + } + + builder.WriteString(")") + + if w.builder.driver == Postgres { + builder.WriteString(" RETURNING id") + } + + query := sqlx.Rebind(sqlx.BindType(string(w.builder.driver)), builder.String()) + + if w.builder.debug { + w.builder.logger.Info(ctx, query, zap.Any("args", binds)) + } + + return query, binds +} + +func (w *queryWrapper) insertWithMap(data X) (columns []string, binds []interface{}) { + fieldNum := len(data) + + columns = make([]string, 0, fieldNum) + binds = make([]interface{}, 0, fieldNum) + + for k, v := range data { + columns = append(columns, k) + binds = append(binds, v) + } + + return +} + +func (w *queryWrapper) insertWithStruct(v reflect.Value) (columns []string, binds []interface{}) { + fieldNum := v.NumField() + + columns = make([]string, 0, fieldNum) + binds = make([]interface{}, 0, fieldNum) + + t := v.Type() + + for i := 0; i < fieldNum; i++ { + fieldT := t.Field(i) + tag := fieldT.Tag.Get("db") + + if tag == "-" { + continue + } + + fieldV := v.Field(i) + column := fieldT.Name + + if len(tag) != 0 { + name, opts := parseTag(tag) + + if opts.Contains("omitempty") && isEmptyValue(fieldV) { + continue + } + + column = name + } + + columns = append(columns, column) + binds = append(binds, fieldV.Interface()) + } + + return +} + +func (w *queryWrapper) ToBatchInsert(ctx context.Context, data interface{}) (string, []interface{}) { + v := reflect.Indirect(reflect.ValueOf(data)) + + if v.Kind() != reflect.Slice { + w.builder.logger.Err(ctx, "err build batch insert", zap.Error(ErrInvalidBatchInsertData)) + + return "", nil + } + + if v.Len() == 0 { + w.builder.logger.Err(ctx, "err build batch insert", zap.Error(errors.New("err empty data"))) + + return "", nil + } + + var ( + columns []string + binds []interface{} + ) + + e := v.Type().Elem() + + switch e.Kind() { + case reflect.Map: + x, ok := data.([]X) + + if !ok { + w.builder.logger.Err(ctx, "err build batch insert", zap.Error(ErrInvalidBatchInsertData)) + + return "", nil + } + + columns, binds = w.batchInsertWithMap(x) + case reflect.Struct: + columns, binds = w.batchInsertWithStruct(v) + case reflect.Ptr: + if e.Elem().Kind() != reflect.Struct { + w.builder.logger.Err(ctx, "err build batch insert", zap.Error(ErrInvalidBatchInsertData)) + + return "", nil + } + + columns, binds = w.batchInsertWithStruct(v) + default: + w.builder.logger.Err(ctx, "err build batch insert", zap.Error(ErrInvalidBatchInsertData)) + + return "", nil + } + + columnLen := len(columns) + + if columnLen == 0 { + return "", nil + } + + var builder strings.Builder + + builder.WriteString("INSERT INTO ") + builder.WriteString(w.table) + builder.WriteString(" (") + builder.WriteString(columns[0]) + + for _, column := range columns[1:] { + builder.WriteString(", ") + builder.WriteString(column) + } + + builder.WriteString(") VALUES (?") + + // 首行 + for i := 1; i < columnLen; i++ { + builder.WriteString(", ?") + } + + builder.WriteString(")") + + rows := len(binds) / columnLen + + // 其余行 + for i := 1; i < rows; i++ { + builder.WriteString(", (?") + + for j := 1; j < columnLen; j++ { + builder.WriteString(", ?") + } + + builder.WriteString(")") + } + + query := sqlx.Rebind(sqlx.BindType(string(w.builder.driver)), builder.String()) + + if w.builder.debug { + w.builder.logger.Info(ctx, query, zap.Any("args", binds)) + } + + return query, binds +} + +func (w *queryWrapper) batchInsertWithMap(data []X) (columns []string, binds []interface{}) { + dataLen := len(data) + fieldNum := len(data[0]) + + columns = make([]string, 0, fieldNum) + binds = make([]interface{}, 0, fieldNum*dataLen) + + for k := range data[0] { + columns = append(columns, k) + } + + for _, x := range data { + for _, v := range columns { + binds = append(binds, x[v]) + } + } + + return +} + +func (w *queryWrapper) batchInsertWithStruct(v reflect.Value) (columns []string, binds []interface{}) { + first := reflect.Indirect(v.Index(0)) + + dataLen := v.Len() + fieldNum := first.NumField() + + columns = make([]string, 0, fieldNum) + binds = make([]interface{}, 0, fieldNum*dataLen) + + t := first.Type() + + for i := 0; i < dataLen; i++ { + for j := 0; j < fieldNum; j++ { + fieldT := t.Field(j) + + tag := fieldT.Tag.Get("db") + + if tag == "-" { + continue + } + + fieldV := reflect.Indirect(v.Index(i)).Field(j) + column := fieldT.Name + + if len(tag) != 0 { + name, opts := parseTag(tag) + + if opts.Contains("omitempty") && isEmptyValue(fieldV) { + continue + } + + column = name + } + + if i == 0 { + columns = append(columns, column) + } + + binds = append(binds, fieldV.Interface()) + } + } + + return +} + +func (w *queryWrapper) ToUpdate(ctx context.Context, data interface{}) (string, []interface{}) { + var ( + columns []string + exprs map[string]string + binds []interface{} + ) + + v := reflect.Indirect(reflect.ValueOf(data)) + + switch v.Kind() { + case reflect.Map: + x, ok := data.(X) + + if !ok { + w.builder.logger.Err(ctx, "err build update", zap.Error(ErrInvalidUpsertData)) + + return "", nil + } + + columns, exprs, binds = w.updateWithMap(x) + case reflect.Struct: + columns, binds = w.updateWithStruct(v) + default: + w.builder.logger.Err(ctx, "err build update", zap.Error(ErrInvalidUpsertData)) + + return "", nil + } + + if len(columns) == 0 { + return "", nil + } + + var builder strings.Builder + + builder.WriteString("UPDATE ") + builder.WriteString(w.table) + builder.WriteString(" SET ") + builder.WriteString(columns[0]) + + if expr, ok := exprs[columns[0]]; ok { + builder.WriteString(" = ") + builder.WriteString(expr) + } else { + builder.WriteString(" = ?") + } + + for _, column := range columns[1:] { + builder.WriteString(", ") + builder.WriteString(column) + + if expr, ok := exprs[column]; ok { + builder.WriteString(" = ") + builder.WriteString(expr) + } else { + builder.WriteString(" = ?") + } + } + + if w.where != nil { + builder.WriteString(" WHERE ") + builder.WriteString(w.where.query) + + binds = append(binds, w.where.binds...) + } + + query := builder.String() + + if w.whereIn { + var err error + + query, binds, err = sqlx.In(query, binds...) + + if err != nil { + w.builder.logger.Err(ctx, "err build update", zap.Error(err)) + + return "", nil + } + } + + query = sqlx.Rebind(sqlx.BindType(string(w.builder.driver)), query) + + if w.builder.debug { + w.builder.logger.Info(ctx, query, zap.Any("args", binds)) + } + + return query, binds +} + +func (w *queryWrapper) updateWithMap(data X) (columns []string, exprs map[string]string, binds []interface{}) { + fieldNum := len(data) + + columns = make([]string, 0, fieldNum) + exprs = make(map[string]string) + binds = make([]interface{}, 0, fieldNum) + + for k, v := range data { + columns = append(columns, k) + + if clause, ok := v.(*SQLClause); ok { + exprs[k] = clause.query + binds = append(binds, clause.binds...) + + continue + } + + binds = append(binds, v) + } + + return +} + +func (w *queryWrapper) updateWithStruct(v reflect.Value) (columns []string, binds []interface{}) { + fieldNum := v.NumField() + + columns = make([]string, 0, fieldNum) + binds = make([]interface{}, 0, fieldNum) + + t := v.Type() + + for i := 0; i < fieldNum; i++ { + fieldT := t.Field(i) + tag := fieldT.Tag.Get("db") + + if tag == "-" { + continue + } + + fieldV := v.Field(i) + column := fieldT.Name + + if len(tag) != 0 { + name, opts := parseTag(tag) + + if opts.Contains("omitempty") && isEmptyValue(fieldV) { + continue + } + + column = name + } + + columns = append(columns, column) + binds = append(binds, fieldV.Interface()) + } + + return +} + +func (w *queryWrapper) ToDelete(ctx context.Context) (string, []interface{}) { + binds := make([]interface{}, 0) + + var builder strings.Builder + + builder.WriteString("DELETE FROM ") + builder.WriteString(w.table) + + if w.where != nil { + builder.WriteString(" WHERE ") + builder.WriteString(w.where.query) + + binds = append(binds, w.where.binds...) + } + + query := builder.String() + + if w.whereIn { + var err error + + query, binds, err = sqlx.In(query, binds...) + + if err != nil { + w.builder.logger.Err(ctx, "err build delete", zap.Error(err)) + + return "", nil + } + } + + query = sqlx.Rebind(sqlx.BindType(string(w.builder.driver)), query) + + if w.builder.debug { + w.builder.logger.Info(ctx, query, zap.Any("args", binds)) + } + + return query, binds +} + +func (w *queryWrapper) ToTruncate(ctx context.Context) string { + var builder strings.Builder + + builder.WriteString("TRUNCATE ") + builder.WriteString(w.table) + + query := builder.String() + + if w.builder.debug { + w.builder.logger.Info(ctx, query) + } + + return query +} + +// BuilderOption configures how we set up the SQL builder. +type BuilderOption func(builder *queryBuilder) + +// WithSQLLogger specifies logger for SQL builder. +func WithSQLLogger(l CtxLogger) BuilderOption { + return func(builder *queryBuilder) { + builder.logger = l + } +} + +// WithSQLDebug specifies debug mode for SQL builder. +func WithSQLDebug() BuilderOption { + return func(builder *queryBuilder) { + builder.debug = true + } +} + +// QueryOption configures how we set up the SQL query statement. +type QueryOption func(w *queryWrapper) + +// Table specifies the query table. +func Table(name string) QueryOption { + return func(w *queryWrapper) { + w.table = name + } +} + +// Select specifies the query columns. +func Select(columns ...string) QueryOption { + return func(w *queryWrapper) { + w.columns = columns + } +} + +// Distinct specifies the `distinct` clause. +func Distinct(columns ...string) QueryOption { + return func(w *queryWrapper) { + w.columns = columns + w.distinct = true + } +} + +// Join specifies the `inner join` clause. +func Join(table, on string) QueryOption { + return func(w *queryWrapper) { + w.joins = append(w.joins, &SQLClause{ + table: table, + keyword: "INNER", + query: on, + }) + } +} + +// LeftJoin specifies the `left join` clause. +func LeftJoin(table, on string) QueryOption { + return func(w *queryWrapper) { + w.joins = append(w.joins, &SQLClause{ + table: table, + keyword: "LEFT", + query: on, + }) + } +} + +// RightJoin specifies the `right join` clause. +func RightJoin(table, on string) QueryOption { + return func(w *queryWrapper) { + w.joins = append(w.joins, &SQLClause{ + table: table, + keyword: "RIGHT", + query: on, + }) + } +} + +// FullJoin specifies the `full join` clause. +func FullJoin(table, on string) QueryOption { + return func(w *queryWrapper) { + w.joins = append(w.joins, &SQLClause{ + table: table, + keyword: "FULL", + query: on, + }) + } +} + +// CrossJoin specifies the `cross join` clause. +func CrossJoin(table string) QueryOption { + return func(w *queryWrapper) { + w.joins = append(w.joins, &SQLClause{ + table: table, + keyword: "CROSS", + }) + } +} + +// Where specifies the `where` clause. +func Where(query string, binds ...interface{}) QueryOption { + return func(w *queryWrapper) { + w.where = &SQLClause{ + query: query, + binds: binds, + } + } +} + +// WhereIn specifies the `where in` clause. +func WhereIn(query string, binds ...interface{}) QueryOption { + return func(w *queryWrapper) { + w.where = &SQLClause{ + query: query, + binds: binds, + } + + w.whereIn = true + } +} + +// GroupBy specifies the `group by` clause. +func GroupBy(columns ...string) QueryOption { + return func(w *queryWrapper) { + w.groups = columns + } +} + +// Having specifies the `having` clause. +func Having(query string, binds ...interface{}) QueryOption { + return func(w *queryWrapper) { + w.having = &SQLClause{ + query: query, + binds: binds, + } + } +} + +// OrderBy specifies the `order by` clause. +func OrderBy(columns ...string) QueryOption { + return func(w *queryWrapper) { + w.orders = columns + } +} + +// Offset specifies the `offset` clause. +func Offset(n int) QueryOption { + return func(w *queryWrapper) { + w.offset = n + } +} + +// Limit specifies the `limit` clause. +func Limit(n int) QueryOption { + return func(w *queryWrapper) { + w.limit = n + } +} + +// Union specifies the `union` clause. +func Union(wrappers ...SQLWrapper) QueryOption { + return func(w *queryWrapper) { + for _, wrapper := range wrappers { + v, ok := wrapper.(*queryWrapper) + + if !ok { + continue + } + + if v.whereIn { + w.whereIn = true + } + + query, binds := v.subquery() + + w.unions = append(w.unions, &SQLClause{ + keyword: "UNION", + query: query, + binds: binds, + }) + } + } +} + +// UnionAll specifies the `union all` clause. +func UnionAll(wrappers ...SQLWrapper) QueryOption { + return func(w *queryWrapper) { + for _, wrapper := range wrappers { + v, ok := wrapper.(*queryWrapper) + + if !ok { + continue + } + + if v.whereIn { + w.whereIn = true + } + + query, binds := v.subquery() + + w.unions = append(w.unions, &SQLClause{ + keyword: "UNION ALL", + query: query, + binds: binds, + }) + } + } +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + + s := string(o) + + for len(s) != 0 { + var next string + + i := strings.Index(s, ",") + + if i >= 0 { + s, next = s[:i], s[i+1:] + } + + if s == optionName { + return true + } + + s = next + } + + return false +} + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + + return tag, tagOptions("") +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return false +} diff --git a/vendor/github.com/shenghui0779/yiigo/ssh.go b/vendor/github.com/shenghui0779/yiigo/ssh.go new file mode 100644 index 00000000..fb70a9ec --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/ssh.go @@ -0,0 +1,82 @@ +package yiigo + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + + "golang.org/x/crypto/ssh" +) + +type SSHKey struct { + IDRSA []byte + IDRSAPub []byte + Fingerprint string +} + +// GenerateSSHKey returns ssh id_rsa and id_rsa.pub. +// Note: id_rsa.pub ends with `\n` +func GenerateSSHKey() (*SSHKey, error) { + prvKey, err := rsa.GenerateKey(rand.Reader, 2048) + + if err != nil { + return nil, err + } + + key := new(SSHKey) + + key.IDRSA = pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(prvKey), + }) + + pubKey, err := ssh.NewPublicKey(prvKey.Public()) + + if err != nil { + return nil, err + } + + key.IDRSAPub = ssh.MarshalAuthorizedKey(pubKey) + key.Fingerprint = MD5(string(pubKey.Marshal())) + + return key, nil +} + +// RSAPemToSSH converts rsa public key from pem to ssh-rsa. +// Note: value ends with `\n` +func RSAPemToSSH(pemPubKey []byte) (sshRSA []byte, fingerprint string, err error) { + block, _ := pem.Decode(pemPubKey) + + if block == nil { + err = errors.New("invalid rsa public key") + + return + } + + pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) + + if err != nil { + return + } + + rsaKey, ok := pubKey.(*rsa.PublicKey) + + if !ok { + err = errors.New("invalid rsa public key") + + return + } + + sshKey, err := ssh.NewPublicKey(rsaKey) + + if err != nil { + return + } + + sshRSA = ssh.MarshalAuthorizedKey(sshKey) + fingerprint = MD5(string(sshKey.Marshal())) + + return +} diff --git a/vendor/github.com/shenghui0779/yiigo/timingwheel.go b/vendor/github.com/shenghui0779/yiigo/timingwheel.go new file mode 100644 index 00000000..c11706f8 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/timingwheel.go @@ -0,0 +1,275 @@ +package yiigo + +import ( + "context" + "errors" + "fmt" + "runtime/debug" + "sync" + "time" + + "go.uber.org/zap" +) + +// TWHandler the function to execute when task expired. +type TWHandler func(ctx context.Context, taskID string) error + +// TWDelay the function returns the delay time for the next execution of task. +type TWDelay func(attempts uint16) time.Duration + +// TWTask timing wheel task. +type TWTask struct { + ctx context.Context + round int + addedAt time.Time + remainder time.Duration + callback TWHandler + maxAttempts uint16 + attempts uint16 + delayFunc TWDelay +} + +// TimingWheel a simple single timing wheel. +type TimingWheel interface { + // AddOnceTask adds a task which will be executed only once when expired. + AddOnceTask(ctx context.Context, taskID string, callback TWHandler, delay time.Duration) + + // AddRetryTask adds a task which will be executed when expired, and if an error is returned, it will be retried multiple times. + AddRetryTask(ctx context.Context, taskID string, callback TWHandler, attempts uint16, delay TWDelay) + + // Stop stops the timing wheel. + Stop() +} + +type timewheel struct { + slot int + tick time.Duration + size int + bucket []sync.Map + stop chan struct{} + ctxNew func(ctx context.Context) context.Context + logger CtxLogger + debug bool +} + +func (tw *timewheel) AddOnceTask(ctx context.Context, taskID string, callback TWHandler, delay time.Duration) { + task := &TWTask{ + ctx: ctx, + callback: callback, + maxAttempts: 1, + delayFunc: func(attempts uint16) time.Duration { + return delay + }, + } + + tw.requeue(ctx, taskID, task) +} + +func (tw *timewheel) AddRetryTask(ctx context.Context, taskID string, callback TWHandler, attempts uint16, delay TWDelay) { + task := &TWTask{ + ctx: ctx, + callback: callback, + maxAttempts: attempts, + delayFunc: delay, + } + + tw.requeue(ctx, taskID, task) +} + +func (tw *timewheel) Stop() { + select { + case <-tw.stop: + tw.logger.Warn(context.Background(), "TimingWheel has stoped") + + return + default: + } + + close(tw.stop) + + tw.logger.Warn(context.Background(), fmt.Sprintf("TimingWheel stoped at: %s", time.Now().String())) +} + +func (tw *timewheel) requeue(ctx context.Context, taskID string, task *TWTask) { + select { + case <-tw.stop: + tw.logger.Err(ctx, fmt.Sprintf("err task(%s) requeue", taskID), zap.Uint16("attempts", task.attempts+1), zap.Error(errors.New("TimingWheel has stoped"))) + + return + default: + } + + if task.attempts >= task.maxAttempts { + tw.logger.Warn(ctx, fmt.Sprintf("task(%s) attempted %d times, giving up", taskID, task.attempts), zap.Uint16("max_attempts", task.maxAttempts)) + + return + } + + task.attempts++ + + duration := task.delayFunc(task.attempts) + slot := tw.place(task, duration) + + task.addedAt = time.Now() + + if duration < tw.tick { + go tw.run(taskID, task) + + return + } + + tw.bucket[slot].Store(taskID, task) +} + +func (tw *timewheel) place(task *TWTask, delay time.Duration) int { + tick := tw.tick.Nanoseconds() + total := tick * int64(tw.size) + duration := delay.Nanoseconds() + + if duration > total { + task.round = int(duration / total) + duration = duration % total + + if duration == 0 { + task.round-- + } + } + + task.remainder = time.Duration(duration % tick) + + return (tw.slot + int(duration/tick)) % tw.size +} + +func (tw *timewheel) scheduler() { + ticker := time.NewTicker(tw.tick) + defer ticker.Stop() + + for { + select { + case <-tw.stop: + return + case <-ticker.C: + tw.slot = (tw.slot + 1) % tw.size + go tw.process(tw.slot) + } + } +} + +func (tw *timewheel) process(slot int) { + tw.bucket[slot].Range(func(key, value interface{}) bool { + select { + case <-tw.stop: + return false + default: + } + + taskID := key.(string) + task := value.(*TWTask) + + if task.round > 0 { + task.round-- + + return true + } + + go tw.run(taskID, task) + + tw.bucket[slot].Delete(key) + + return true + }) +} + +func (tw *timewheel) run(taskID string, task *TWTask) { + if task.remainder > 0 { + time.Sleep(task.remainder) + } + + delay := time.Since(task.addedAt).String() + + ctx := tw.ctxNew(task.ctx) + + defer func() { + if err := recover(); err != nil { + tw.logger.Err(ctx, fmt.Sprintf("task(%s) run panic", taskID), zap.Uint16("attempts", task.attempts), zap.String("delay", delay), zap.Any("error", err), zap.ByteString("stack", debug.Stack())) + + if task.attempts < task.maxAttempts { + tw.requeue(ctx, taskID, task) + } + } + }() + + if err := task.callback(ctx, taskID); err != nil { + tw.logger.Err(ctx, fmt.Sprintf("err task(%s) run", taskID), zap.Uint16("attempts", task.attempts), zap.String("delay", delay), zap.Error(err)) + + if task.attempts < task.maxAttempts { + tw.requeue(ctx, taskID, task) + } + + return + } + + if tw.debug { + tw.logger.Info(ctx, fmt.Sprintf("task(%s) run ok", taskID), zap.Uint16("attempts", task.attempts), zap.String("delay", delay)) + } +} + +// TWOption timing wheel option. +type TWOption func(tw *timewheel) + +// WithTWCtx clones context for executing tasks asynchronously, the default is `context.Background()`. +func WithTWCtx(fn func(ctx context.Context) context.Context) TWOption { + return func(tw *timewheel) { + tw.ctxNew = fn + } +} + +// WithTWLogger specifies logger for timing wheel. +func WithTWLogger(l CtxLogger) TWOption { + return func(tw *timewheel) { + tw.logger = l + } +} + +// WithTWDebug specifies debug mode for timing wheel. +func WithTWDebug() TWOption { + return func(tw *timewheel) { + tw.debug = true + } +} + +type twLogger struct{} + +func (l *twLogger) Info(ctx context.Context, msg string, fields ...zap.Field) { + logger.Info(fmt.Sprintf("[tw] %s", msg), fields...) +} + +func (l *twLogger) Warn(ctx context.Context, msg string, fields ...zap.Field) { + logger.Warn(fmt.Sprintf("[tw] %s", msg), fields...) +} + +func (l *twLogger) Err(ctx context.Context, msg string, fields ...zap.Field) { + logger.Error(fmt.Sprintf("[tw] %s", msg), fields...) +} + +// NewTimingWheel returns a new timing wheel. +func NewTimingWheel(tick time.Duration, size int, options ...TWOption) TimingWheel { + tw := &timewheel{ + tick: tick, + size: size, + bucket: make([]sync.Map, size), + stop: make(chan struct{}), + ctxNew: func(ctx context.Context) context.Context { + return context.Background() + }, + logger: new(twLogger), + } + + for _, f := range options { + f(tw) + } + + go tw.scheduler() + + return tw +} diff --git a/vendor/github.com/shenghui0779/yiigo/validator.go b/vendor/github.com/shenghui0779/yiigo/validator.go new file mode 100644 index 00000000..e8ef93ed --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/validator.go @@ -0,0 +1,167 @@ +package yiigo + +import ( + "context" + "database/sql/driver" + "errors" + "reflect" + "strings" + + "github.com/go-playground/locales/zh" + ut "github.com/go-playground/universal-translator" + "github.com/go-playground/validator/v10" + zhcn "github.com/go-playground/validator/v10/translations/zh" + "go.uber.org/zap" +) + +// ValidatorOption configures how we set up the validator. +type ValidatorOption func(validate *validator.Validate, trans ut.Translator) + +// SetValidateTag allows for changing of the default validate tag name: valid. +func SetValidateTag(tagname string) ValidatorOption { + return func(validate *validator.Validate, trans ut.Translator) { + validate.SetTagName(tagname) + } +} + +// WithValuerType registers a number of custom validate types which implement the driver.Valuer interface. +func WithValuerType(types ...driver.Valuer) ValidatorOption { + customTypes := make([]interface{}, 0, len(types)) + + for _, t := range types { + customTypes = append(customTypes, t) + } + + return func(validate *validator.Validate, trans ut.Translator) { + validate.RegisterCustomTypeFunc(func(field reflect.Value) interface{} { + if valuer, ok := field.Interface().(driver.Valuer); ok { + v, _ := valuer.Value() + + return v + } + + return nil + }, customTypes...) + } +} + +// WithValidation adds a custom validation with the given tag. +func WithValidation(tag string, fn validator.Func, callValidationEvenIfNull ...bool) ValidatorOption { + return func(validate *validator.Validate, trans ut.Translator) { + if err := validate.RegisterValidation(tag, fn, callValidationEvenIfNull...); err != nil { + logger.Error("[yiigo] err register validation", zap.Error(err)) + } + } +} + +// WithValidationCtx does the same as WithValidation on accepts a FuncCtx validation allowing context.Context validation support. +func WithValidationCtx(tag string, fn validator.FuncCtx, callValidationEvenIfNull ...bool) ValidatorOption { + return func(validate *validator.Validate, trans ut.Translator) { + if err := validate.RegisterValidationCtx(tag, fn, callValidationEvenIfNull...); err != nil { + logger.Error("[yiigo] err register validation with ctx", zap.Error(err)) + } + } +} + +// WithTranslation registers custom validate translation against the provided tag. +// Param text, eg: {0}为必填字段 或 {0}必须大于{1} +func WithTranslation(tag, text string, override bool) ValidatorOption { + return func(validate *validator.Validate, trans ut.Translator) { + validate.RegisterTranslation(tag, trans, func(ut ut.Translator) error { + return ut.Add(tag, text, override) + }, func(ut ut.Translator, fe validator.FieldError) string { + t, _ := ut.T(tag, fe.Field(), fe.Param()) + + return t + }) + } +} + +// Validator a validator which can be used for Gin. +type Validator struct { + validator *validator.Validate + translator ut.Translator +} + +// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type. +func (v *Validator) ValidateStruct(obj interface{}) error { + if reflect.Indirect(reflect.ValueOf(obj)).Kind() != reflect.Struct { + return nil + } + + if err := v.validator.Struct(obj); err != nil { + e, ok := err.(validator.ValidationErrors) + + if !ok { + return err + } + + errM := e.Translate(v.translator) + msgs := make([]string, 0, len(errM)) + + for _, v := range errM { + msgs = append(msgs, v) + } + + return errors.New(strings.Join(msgs, ";")) + } + + return nil +} + +// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type and allows passing of context.Context for contextual validation information. +func (v *Validator) ValidateStructCtx(ctx context.Context, obj interface{}) error { + if reflect.Indirect(reflect.ValueOf(obj)).Kind() != reflect.Struct { + return nil + } + + if err := v.validator.StructCtx(ctx, obj); err != nil { + e, ok := err.(validator.ValidationErrors) + + if !ok { + return err + } + + errM := e.Translate(v.translator) + msgs := make([]string, 0, len(errM)) + + for _, v := range errM { + msgs = append(msgs, v) + } + + return errors.New(strings.Join(msgs, ";")) + } + + return nil +} + +// Engine returns the underlying validator engine which powers the default +// Validator instance. This is useful if you want to register custom validations +// or struct level validations. See validator GoDoc for more info - +// https://pkg.go.dev/github.com/go-playground/validator/v10 +func (v *Validator) Engine() interface{} { + return v.validator +} + +// NewValidator returns a new validator with default tag name: valid. +// Used for Gin: binding.Validator = yiigo.NewValidator() +func NewValidator(options ...ValidatorOption) *Validator { + validate := validator.New() + validate.SetTagName("valid") + + zhTrans := zh.New() + trans, _ := ut.New(zhTrans, zhTrans).GetTranslator("zh") + + if err := zhcn.RegisterDefaultTranslations(validate, trans); err != nil { + logger.Error("[yiigo] err validation translator", zap.Error(err)) + } + + for _, f := range options { + f(validate, trans) + } + + return &Validator{ + validator: validate, + translator: trans, + } +} diff --git a/vendor/github.com/shenghui0779/yiigo/websocket.go b/vendor/github.com/shenghui0779/yiigo/websocket.go new file mode 100644 index 00000000..755eea23 --- /dev/null +++ b/vendor/github.com/shenghui0779/yiigo/websocket.go @@ -0,0 +1,241 @@ +package yiigo + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/gorilla/websocket" + "github.com/tidwall/pretty" + "go.uber.org/zap" +) + +var wsupgrader *websocket.Upgrader + +// WSMsg websocket message +type WSMsg interface { + // T returns ws msg type. + T() int + + // V returns ws msg value. + V() []byte +} + +// wsmsg websocket message +type wsmsg struct { + t int + v []byte +} + +func (m *wsmsg) T() int { + return m.t +} + +func (m *wsmsg) V() []byte { + return m.v +} + +// NewWSMsg returns a new ws message. +func NewWSMsg(t int, v []byte) WSMsg { + return &wsmsg{ + t: t, + v: v, + } +} + +// NewWSTextMsg returns a new ws text message. +func NewWSTextMsg(v []byte) WSMsg { + return &wsmsg{ + t: websocket.TextMessage, + v: v, + } +} + +// NewWSBinaryMsg returns a new ws binary message. +func NewWSBinaryMsg(v []byte) WSMsg { + return &wsmsg{ + t: websocket.BinaryMessage, + v: v, + } +} + +// WSHandler the function to handle ws message. +type WSHandler func(ctx context.Context, msg WSMsg) (WSMsg, error) + +// WSConn websocket connection +type WSConn interface { + // Read reads message from ws connection. + Read(ctx context.Context, callback WSHandler) error + + // Write writes message to ws connection. + Write(ctx context.Context, msg WSMsg) error + + // Close closes ws connection. + Close(ctx context.Context) +} + +type wsconn struct { + name string + conn *websocket.Conn + authOK bool + authFunc WSHandler + logger CtxLogger + debug bool +} + +func (c *wsconn) Read(ctx context.Context, callback WSHandler) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + t, b, err := c.conn.ReadMessage() + + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure) { + c.logger.Info(ctx, fmt.Sprintf("conn(%s) closed", c.name), zap.String("msg", err.Error())) + + return nil + } + + if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { + c.logger.Warn(ctx, fmt.Sprintf("conn(%s) closed unexpectedly", c.name), zap.String("msg", err.Error())) + + return nil + } + + return err + } + + if c.debug { + c.logger.Info(ctx, fmt.Sprintf("conn(%s) read msg", c.name), zap.Int("msg.T", t), zap.ByteString("msg.V", pretty.Ugly(b))) + } + + var msg WSMsg + + // if `authFunc` is not nil and unauthorized, need to authorize first. + if c.authFunc != nil && !c.authOK { + msg, err = c.authFunc(ctx, NewWSMsg(t, b)) + + if err != nil { + msg = NewWSTextMsg([]byte(err.Error())) + } else { + c.authOK = true + } + } else { + if callback != nil { + msg, err = callback(ctx, NewWSMsg(t, b)) + + if err != nil { + msg = NewWSTextMsg([]byte(err.Error())) + } + } + } + + if msg != nil { + if c.debug { + c.logger.Info(ctx, fmt.Sprintf("conn(%s) write msg", c.name), zap.Int("msg.T", msg.T()), zap.ByteString("msg.V", pretty.Ugly(msg.V()))) + } + + if err = c.conn.WriteMessage(msg.T(), msg.V()); err != nil { + c.logger.Err(ctx, fmt.Sprintf("err conn(%s) write msg", c.name), zap.Error(err), zap.Int("msg.T", msg.T()), zap.ByteString("msg.V", pretty.Ugly(msg.V()))) + } + } + } + } +} + +func (c *wsconn) Write(ctx context.Context, msg WSMsg) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // if `authFunc` is not nil and unauthorized, disable to write message. + if c.authFunc != nil && !c.authOK { + c.logger.Warn(ctx, fmt.Sprintf("conn(%s) write msg disabled due to unauthorized", c.name), zap.Int("msg.T", msg.T()), zap.ByteString("msg.V", pretty.Ugly(msg.V()))) + + return nil + } + + if c.debug { + c.logger.Info(ctx, fmt.Sprintf("conn(%s) write msg", c.name), zap.Int("msg.T", msg.T()), zap.ByteString("msg.V", pretty.Ugly(msg.V()))) + } + + if err := c.conn.WriteMessage(msg.T(), msg.V()); err != nil { + return err + } + + return nil +} + +func (c *wsconn) Close(ctx context.Context) { + if err := c.conn.Close(); err != nil { + c.logger.Err(ctx, fmt.Sprintf("err close conn(%s)", c.name), zap.Error(err)) + } +} + +// WSOption ws connection option. +type WSOption func(c *wsconn) + +// WithWSAuth specifies authorization for ws connection. +func WithWSAuth(fn WSHandler) WSOption { + return func(c *wsconn) { + c.authFunc = fn + } +} + +// WithWSLogger specifies logger for ws connection. +func WithWSLogger(l CtxLogger) WSOption { + return func(c *wsconn) { + c.logger = l + } +} + +// WithWSDebug specifies debug mode for ws connection. +func WithWSDebug() WSOption { + return func(c *wsconn) { + c.debug = true + } +} + +type wsLogger struct{} + +func (l *wsLogger) Info(ctx context.Context, msg string, fields ...zap.Field) { + logger.Info(fmt.Sprintf("[ws] %s", msg), fields...) +} + +func (l *wsLogger) Warn(ctx context.Context, msg string, fields ...zap.Field) { + logger.Warn(fmt.Sprintf("[ws] %s", msg), fields...) +} + +func (l *wsLogger) Err(ctx context.Context, msg string, fields ...zap.Field) { + logger.Error(fmt.Sprintf("[ws] %s", msg), fields...) +} + +// NewWSConn returns a new ws connection. +func NewWSConn(name string, w http.ResponseWriter, r *http.Request, options ...WSOption) (WSConn, error) { + if wsupgrader == nil { + return nil, errors.New("upgrader is nil (forgotten configure?)") + } + + c, err := wsupgrader.Upgrade(w, r, nil) + + if err != nil { + return nil, err + } + + conn := &wsconn{ + name: name, + conn: c, + logger: new(wsLogger), + } + + for _, f := range options { + f(conn) + } + + return conn, nil +} diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 00000000..993b83f2 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 00000000..d3be5e54 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,122 @@ +# Pretty + +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty) + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op +BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op +BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op +BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 00000000..f3f756aa --- /dev/null +++ b/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,674 @@ +package pretty + +import ( + "bytes" + "encoding/json" + "sort" + "strconv" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func isNaNOrInf(src []byte) bool { + return src[0] == 'i' || //Inf + src[0] == 'I' || // inf + src[0] == '+' || // +Inf + src[0] == 'N' || // Nan + (src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKeyVal struct { + sorted bool + json []byte + buf []byte + pairs []pair +} + +func (arr *byKeyVal) Len() int { + return len(arr.pairs) +} +func (arr *byKeyVal) Less(i, j int) bool { + if arr.isLess(i, j, byKey) { + return true + } + if arr.isLess(j, i, byKey) { + return false + } + return arr.isLess(i, j, byVal) +} +func (arr *byKeyVal) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +type byKind int + +const ( + byKey byKind = 0 + byVal byKind = 1 +) + +type jtype int + +const ( + jnull jtype = iota + jfalse + jnumber + jstring + jtrue + jjson +) + +func getjtype(v []byte) jtype { + if len(v) == 0 { + return jnull + } + switch v[0] { + case '"': + return jstring + case 'f': + return jfalse + case 't': + return jtrue + case 'n': + return jnull + case '[', '{': + return jjson + default: + return jnumber + } +} + +func (arr *byKeyVal) isLess(i, j int, kind byKind) bool { + k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend] + k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend] + var v1, v2 []byte + if kind == byKey { + v1 = k1 + v2 = k2 + } else { + v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend]) + v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend]) + if len(v1) >= len(k1)+1 { + v1 = bytes.TrimSpace(v1[len(k1)+1:]) + } + if len(v2) >= len(k2)+1 { + v2 = bytes.TrimSpace(v2[len(k2)+1:]) + } + } + t1 := getjtype(v1) + t2 := getjtype(v2) + if t1 < t2 { + return true + } + if t1 > t2 { + return false + } + if t1 == jstring { + s1 := parsestr(v1) + s2 := parsestr(v2) + return string(s1) < string(s2) + } + if t1 == jnumber { + n1, _ := strconv.ParseFloat(string(v1), 64) + n2, _ := strconv.ParseFloat(string(v2), 64) + return n1 < n2 + } + return string(v1) < string(v2) + +} + +func parsestr(s []byte) []byte { + for i := 1; i < len(s); i++ { + if s[i] == '\\' { + var str string + json.Unmarshal(s, &str) + return []byte(str) + } + if s[i] == '"' { + return s[1:i] + } + } + return nil +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKeyVal{false, json, buf, pairs} + sort.Stable(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Escape [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle *Style + +func init() { + TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Escape: [2]string{"\x1B[35m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, + } +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + esc := false + uesc := 0 + for i = i + 1; i < len(src); i++ { + if src[i] == '\\' { + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + dst = append(dst, style.Escape[0]...) + dst = apnd(dst, src[i]) + esc = true + if i+1 < len(src) && src[i+1] == 'u' { + uesc = 5 + } else { + uesc = 1 + } + } else if esc { + dst = apnd(dst, src[i]) + if uesc == 1 { + esc = false + dst = append(dst, style.Escape[1]...) + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + } else { + uesc-- + } + } else { + dst = apnd(dst, src[i]) + } + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if esc { + dst = append(dst, style.Escape[1]...) + } else if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} + +// Spec strips out comments and trailing commas and convert the input to a +// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259 +// +// The resulting JSON will always be the same length as the input and it will +// include all of the same line breaks at matching offsets. This is to ensure +// the result can be later processed by a external parser and that that +// parser will report messages or errors with the correct offsets. +func Spec(src []byte) []byte { + return spec(src, nil) +} + +// SpecInPlace is the same as Spec, but this method reuses the input json +// buffer to avoid allocations. Do not use the original bytes slice upon return. +func SpecInPlace(src []byte) []byte { + return spec(src, src) +} + +func spec(src, dst []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] == '/' { + if i < len(src)-1 { + if src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src); i++ { + if src[i] == '\n' { + dst = append(dst, '\n') + break + } else if src[i] == '\t' || src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + if src[i+1] == '*' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src)-1; i++ { + if src[i] == '*' && src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i++ + break + } else if src[i] == '\n' || src[i] == '\t' || + src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + } + } + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } else if src[i] == '}' || src[i] == ']' { + for j := len(dst) - 2; j >= 0; j-- { + if dst[j] <= ' ' { + continue + } + if dst[j] == ',' { + dst[j] = ' ' + } + break + } + } + } + return dst +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 96195bcc..098ed69f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) var ( @@ -118,11 +119,32 @@ type EncodeContext struct { type DecodeContext struct { *Registry Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type // should be used when decoding an embedded document into an empty interface. For example, if // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. + // + // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type +} + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } // ValueCodec is the interface that groups the methods to encode and decode diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 20f4797d..e95cab58 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -1463,7 +1463,7 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr if !val.CanAddr() { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) @@ -1492,13 +1492,6 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val.Set(reflect.New(val.Type().Elem())) } - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. - } - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) if err != nil { return err @@ -1516,6 +1509,13 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson return nil } + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] if !errVal.IsNil() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index c1e20f94..b0ae0e23 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package bsoncodec provides a system for encoding values to BSON representations and decoding // values from BSON representations. This package considers both binary BSON and ExtendedJSON as // BSON representations. The types in this package enable a flexible system for handling this diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index a15636d0..eda417cf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -57,11 +57,18 @@ func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWrit func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument && dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } } rtype, err := dc.LookupTypeMapEntry(valueType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 1f7acbcf..e1fbef9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "encoding" "fmt" "reflect" "strconv" @@ -230,6 +231,19 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } return "", err } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -241,6 +255,7 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { keyVal := reflect.ValueOf(key) @@ -252,6 +267,12 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, v := keyVal.Interface().(KeyUnmarshaler) err = v.UnmarshalKey(key) keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() // Otherwise, go to type specific behavior default: switch keyType.Kind() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 458588b6..ef5d837c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -86,12 +86,11 @@ type valueReader struct { // NewBSONDocumentReader returns a ValueReader using b for the underlying BSON // representation. Parameter b must be a BSON Document. -// -// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes -// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes -// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a -// thing that can return the finished []byte since it might be reallocated when appended to. func NewBSONDocumentReader(b []byte) ValueReader { + // TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the + // TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an + // TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since + // TODO it might be reallocated when appended to. return newValueReader(b) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 7f6b7694..6e189fa5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -33,6 +33,11 @@ var decPool = sync.Pool{ type Decoder struct { dc bsoncodec.DecodeContext vr bsonrw.ValueReader + + // We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from + // (*Decoder).SetContext. + defaultDocumentM bool + defaultDocumentD bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -95,6 +100,12 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { + d.dc.DefaultDocumentM() + } + if d.defaultDocumentD { + d.dc.DefaultDocumentD() + } return decoder.DecodeValue(d.dc, d.vr, rval) } @@ -116,3 +127,15 @@ func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { d.dc = dc return nil } + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentM() { + d.defaultDocumentM = true +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentD() { + d.defaultDocumentD = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go new file mode 100644 index 00000000..635d8e35 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "fmt" + + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +const ( + EncryptedCacheCollection = "ecc" + EncryptedStateCollection = "esc" + EncryptedCompactionCollection = "ecoc" +) + +// GetEncryptedStateCollectionName returns the encrypted state collection name associated with dataCollectionName. +func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionName string, stateCollection string) (string, error) { + fieldName := stateCollection + "Collection" + val, err := efBSON.LookupErr(fieldName) + if err != nil { + if err != bsoncore.ErrElementNotFound { + return "", err + } + // Return default name. + defaultName := "enxcol_." + dataCollectionName + "." + stateCollection + return defaultName, nil + } + + stateCollectionName, ok := val.StringValueOK() + if !ok { + return "", fmt.Errorf("expected string for '%v', got: %v", fieldName, val.Type) + } + return stateCollectionName, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go new file mode 100644 index 00000000..ea07637b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go @@ -0,0 +1,34 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "context" + "time" +) + +type timeoutKey struct{} + +// MakeTimeoutContext returns a new context with Client-Side Operation Timeout (CSOT) feature-gated behavior +// and a Timeout set to the passed in Duration. Setting a Timeout on a single operation is not supported in +// public API. +// +// TODO(GODRIVER-2348) We may be able to remove this function once CSOT feature-gated behavior becomes the +// TODO default behavior. +func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) { + // Only use the passed in Duration as a timeout on the Context if it + // is non-zero. + cancelFunc := func() {} + if to != 0 { + ctx, cancelFunc = context.WithTimeout(ctx, to) + } + return context.WithValue(ctx, timeoutKey{}, true), cancelFunc +} + +func IsTimeoutContext(ctx context.Context) bool { + return ctx.Value(timeoutKey{}) != nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/error.go b/vendor/go.mongodb.org/mongo-driver/internal/error.go index 6a105af4..1fec3f18 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/error.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/error.go @@ -117,3 +117,7 @@ func (e *wrappedError) Error() string { func (e *wrappedError) Inner() error { return e.inner } + +func (e *wrappedError) Unwrap() error { + return e.inner +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go new file mode 100644 index 00000000..44790091 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go @@ -0,0 +1,38 @@ +// Copied from https://cs.opensource.google/go/go/+/946b4baaf6521d521928500b2b57429c149854e7:src/math/bits.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go new file mode 100644 index 00000000..859e4e0e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go @@ -0,0 +1,223 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/exp.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Exponential distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + re = 7.69711747013104972 +) + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1). +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func (r *Rand) ExpFloat64() float64 { + for { + j := r.Uint32() + i := j & 0xFF + x := float64(j) * float64(we[i]) + if j < ke[i] { + return x + } + if i == 0 { + return re - math.Log(r.Float64()) + } + if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { + return x + } + } +} + +var ke = [256]uint32{ + 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, + 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, + 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, + 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, + 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, + 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, + 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, + 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, + 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, + 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, + 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, + 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, + 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, + 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, + 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, + 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, + 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, + 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, + 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, + 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, + 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, + 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, + 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, + 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, + 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, + 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, + 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, + 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, + 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, + 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, + 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, + 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, + 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, + 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, + 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, + 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, + 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, + 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, + 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, + 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, + 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, + 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, + 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, + 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, + 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, + 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, + 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, + 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, + 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, + 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, + 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, + 0xe6da6ecf, +} +var we = [256]float32{ + 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, + 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, + 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, + 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, + 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, + 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, + 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, + 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, + 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, + 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, + 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, + 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, + 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, + 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, + 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, + 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, + 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, + 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, + 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, + 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, + 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, + 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, + 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, + 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, + 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, + 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, + 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, + 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, + 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, + 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, + 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, + 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, + 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, + 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, + 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, + 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, + 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, + 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, + 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, + 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, + 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, + 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, + 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, + 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, + 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, + 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, + 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, + 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, + 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, + 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, + 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, + 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, + 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, + 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, + 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, + 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, + 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, + 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, + 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, + 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, + 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, + 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, + 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, + 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, +} +var fe = [256]float32{ + 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, + 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, + 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, + 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, + 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, + 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, + 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, + 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, + 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, + 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, + 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, + 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, + 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, + 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, + 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, + 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, + 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, + 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, + 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, + 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, + 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, + 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, + 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, + 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, + 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, + 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, + 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, + 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, + 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, + 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, + 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, + 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, + 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, + 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, + 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, + 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, + 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, + 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, + 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, + 0.062193416, 0.060783047, 0.059384305, 0.057997175, + 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, + 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, + 0.043502413, 0.042254124, 0.041017443, 0.039792392, + 0.038578995, 0.037377283, 0.036187284, 0.035009038, + 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, + 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, + 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, + 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, + 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, + 0.008780315, 0.007963077, 0.0071633533, 0.006381906, + 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, + 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, + 0.00045413437, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go new file mode 100644 index 00000000..8c74a358 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go @@ -0,0 +1,158 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/normal.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Normal distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + rn = 3.442619855899 +) + +func absInt32(i int32) uint32 { + if i < 0 { + return uint32(-i) + } + return uint32(i) +} + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1). +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func (r *Rand) NormFloat64() float64 { + for { + j := int32(r.Uint32()) // Possibly negative + i := j & 0x7F + x := float64(j) * float64(wn[i]) + if absInt32(j) < kn[i] { + // This case should be hit better than 99% of the time. + return x + } + + if i == 0 { + // This extra work is only required for the base strip. + for { + x = -math.Log(r.Float64()) * (1.0 / rn) + y := -math.Log(r.Float64()) + if y+y >= x*x { + break + } + } + if j > 0 { + return rn + x + } + return -rn - x + } + if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { + return x + } + } +} + +var kn = [128]uint32{ + 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, + 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, + 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, + 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, + 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, + 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, + 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, + 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, + 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, + 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, + 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, + 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, + 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, + 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, + 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, + 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, + 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, + 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, + 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, + 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, + 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, + 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, + 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, + 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, + 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, + 0x7ba90bdc, 0x7a722176, 0x77d664e5, +} +var wn = [128]float32{ + 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, + 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, + 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, + 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, + 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, + 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, + 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, + 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, + 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, + 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, + 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, + 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, + 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, + 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, + 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, + 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, + 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, + 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, + 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, + 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, + 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, + 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, + 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, + 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, + 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, + 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, + 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, + 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, + 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, + 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, + 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, + 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, +} +var fn = [128]float32{ + 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, + 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, + 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, + 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, + 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, + 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, + 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, + 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, + 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, + 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, + 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, + 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, + 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, + 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, + 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, + 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, + 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, + 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, + 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, + 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, + 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, + 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, + 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, + 0.040742867, 0.03688439, 0.033087887, 0.029356318, + 0.025693292, 0.022103304, 0.018592102, 0.015167298, + 0.011839478, 0.008624485, 0.005548995, 0.0026696292, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go new file mode 100644 index 00000000..ffd0509b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go @@ -0,0 +1,374 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rand.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rand implements pseudo-random number generators. +// +// Random numbers are generated by a Source. Top-level functions, such as +// Float64 and Int, use a default shared Source that produces a deterministic +// sequence of values each time a program is run. Use the Seed function to +// initialize the default Source if different behavior is required for each run. +// The default Source, a LockedSource, is safe for concurrent use by multiple +// goroutines, but Sources created by NewSource are not. However, Sources are small +// and it is reasonable to have a separate Source for each goroutine, seeded +// differently, to avoid locking. +// +// For random numbers suitable for security-sensitive work, see the crypto/rand +// package. +package rand + +import "sync" + +// A Source represents a source of uniformly-distributed +// pseudo-random int64 values in the range [0, 1<<64). +type Source interface { + Uint64() uint64 + Seed(seed uint64) +} + +// NewSource returns a new pseudo-random Source seeded with the given value. +func NewSource(seed uint64) Source { + var rng PCGSource + rng.Seed(seed) + return &rng +} + +// A Rand is a source of random numbers. +type Rand struct { + src Source + + // readVal contains remainder of 64-bit integer used for bytes + // generation during most recent Read call. + // It is saved so next Read call can start where the previous + // one finished. + readVal uint64 + // readPos indicates the number of low-order bytes of readVal + // that are still valid. + readPos int8 +} + +// New returns a new Rand that uses random values from src +// to generate other random values. +func New(src Source) *Rand { + return &Rand{src: src} +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// Seed should not be called concurrently with any other Rand method. +func (r *Rand) Seed(seed uint64) { + if lk, ok := r.src.(*LockedSource); ok { + lk.seedPos(seed, &r.readPos) + return + } + + r.src.Seed(seed) + r.readPos = 0 +} + +// Uint64 returns a pseudo-random 64-bit integer as a uint64. +func (r *Rand) Uint64() uint64 { return r.src.Uint64() } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } + +// Uint32 returns a pseudo-random 32-bit value as a uint32. +func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. +func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } + +// Int returns a non-negative pseudo-random int. +func (r *Rand) Int() int { + u := uint(r.Uint64()) + return int(u << 1 >> 1) // clear sign bit. +} + +const maxUint64 = (1 << 64) - 1 + +// Uint64n returns, as a uint64, a pseudo-random number in [0,n). +// It is guaranteed more uniform than taking a Source value mod n +// for any n that is not a power of 2. +func (r *Rand) Uint64n(n uint64) uint64 { + if n&(n-1) == 0 { // n is power of two, can mask + if n == 0 { + panic("invalid argument to Uint64n") + } + return r.Uint64() & (n - 1) + } + // If n does not divide v, to avoid bias we must not use + // a v that is within maxUint64%n of the top of the range. + v := r.Uint64() + if v > maxUint64-n { // Fast check. + ceiling := maxUint64 - maxUint64%n + for v >= ceiling { + v = r.Uint64() + } + } + + return v % n +} + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + return int64(r.Uint64n(uint64(n))) +} + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int32(r.Uint64n(uint64(n))) +} + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int(r.Uint64n(uint64(n))) +} + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float64() float64 { + // There is one bug in the value stream: r.Int63() may be so close + // to 1<<63 that the division rounds up to 1.0, and we've guaranteed + // that the result is always less than 1.0. + // + // We tried to fix this by mapping 1.0 back to 0.0, but since float64 + // values near 0 are much denser than near 1, mapping 1 to 0 caused + // a theoretically significant overshoot in the probability of returning 0. + // Instead of that, if we round up to 1, just try again. + // Getting 1 only happens 1/2⁵³ of the time, so most clients + // will not observe it anyway. +again: + f := float64(r.Uint64n(1<<53)) / (1 << 53) + if f == 1.0 { + goto again // resample; this branch is taken O(never) + } + return f +} + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float32() float32 { + // We do not want to return 1.0. + // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). +again: + f := float32(r.Float64()) + if f == 1 { + goto again // resample; this branch is taken O(very rarely) + } + return f +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (r *Rand) Perm(n int) []int { + m := make([]int, n) + // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. + // A change to remove this useless iteration is to assign 1 to i in the init + // statement. But Perm also effects r. Making this change will affect + // the final state of r. So this change can't be made for compatibility + // reasons for Go 1. + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func (r *Rand) Shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(r.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(r.Int31n(int32(i + 1))) + swap(i, j) + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. +func (r *Rand) Read(p []byte) (n int, err error) { + if lk, ok := r.src.(*LockedSource); ok { + return lk.Read(p, &r.readVal, &r.readPos) + } + return read(p, r.src, &r.readVal, &r.readPos) +} + +func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { + pos := *readPos + val := *readVal + rng, _ := src.(*PCGSource) + for n = 0; n < len(p); n++ { + if pos == 0 { + if rng != nil { + val = rng.Uint64() + } else { + val = src.Uint64() + } + pos = 8 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + *readPos = pos + *readVal = val + return +} + +/* + * Top-level convenience functions + */ + +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) + +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as +// if seeded by Seed(1). +// Seed, unlike the Rand.Seed method, is safe for concurrent use. +func Seed(seed uint64) { globalRand.Seed(seed) } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 +// from the default Source. +func Int63() int64 { return globalRand.Int63() } + +// Uint32 returns a pseudo-random 32-bit value as a uint32 +// from the default Source. +func Uint32() uint32 { return globalRand.Uint32() } + +// Uint64 returns a pseudo-random 64-bit value as a uint64 +// from the default Source. +func Uint64() uint64 { return globalRand.Uint64() } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 +// from the default Source. +func Int31() int32 { return globalRand.Int31() } + +// Int returns a non-negative pseudo-random int from the default Source. +func Int() int { return globalRand.Int() } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int63n(n int64) int64 { return globalRand.Int63n(n) } + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int31n(n int32) int32 { return globalRand.Int31n(n) } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Intn(n int) int { return globalRand.Intn(n) } + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float64() float64 { return globalRand.Float64() } + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float32() float32 { return globalRand.Float32() } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { return globalRand.Perm(n) } + +// Shuffle pseudo-randomizes the order of elements using the default Source. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } + +// Read generates len(p) random bytes from the default Source and +// writes them into p. It always returns len(p) and a nil error. +// Read, unlike the Rand.Read method, is safe for concurrent use. +func Read(p []byte) (n int, err error) { return globalRand.Read(p) } + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1) +// from the default Source. +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func NormFloat64() float64 { return globalRand.NormFloat64() } + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func ExpFloat64() float64 { return globalRand.ExpFloat64() } + +// LockedSource is an implementation of Source that is concurrency-safe. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. +type LockedSource struct { + lk sync.Mutex + src PCGSource +} + +func (s *LockedSource) Uint64() (n uint64) { + s.lk.Lock() + n = s.src.Uint64() + s.lk.Unlock() + return +} + +func (s *LockedSource) Seed(seed uint64) { + s.lk.Lock() + s.src.Seed(seed) + s.lk.Unlock() +} + +// seedPos implements Seed for a LockedSource without a race condiiton. +func (s *LockedSource) seedPos(seed uint64, readPos *int8) { + s.lk.Lock() + s.src.Seed(seed) + *readPos = 0 + s.lk.Unlock() +} + +// Read implements Read for a LockedSource. +func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { + s.lk.Lock() + n, err = read(p, &s.src, readVal, readPos) + s.lk.Unlock() + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go new file mode 100644 index 00000000..f04f9879 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go @@ -0,0 +1,93 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rng.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "encoding/binary" + "io" + "math/bits" +) + +// PCGSource is an implementation of a 64-bit permuted congruential +// generator as defined in +// +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// +// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) +// as found in the software available at http://www.pcg-random.org/. +// It has period 2^128 with 128 bits of state, producing 64-bit values. +// Is state is represented by two uint64 words. +type PCGSource struct { + low uint64 + high uint64 +} + +const ( + maxUint32 = (1 << 32) - 1 + + multiplier = 47026247687942121848144207491837523525 + mulHigh = multiplier >> 64 + mulLow = multiplier & maxUint64 + + increment = 117397592171526113268558934119004209487 + incHigh = increment >> 64 + incLow = increment & maxUint64 + + // TODO: Use these? + initializer = 245720598905631564143578724636268694099 + initHigh = initializer >> 64 + initLow = initializer & maxUint64 +) + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +func (pcg *PCGSource) Seed(seed uint64) { + pcg.low = seed + pcg.high = seed // TODO: What is right? +} + +// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. +func (pcg *PCGSource) Uint64() uint64 { + pcg.multiply() + pcg.add() + // XOR high and low 64 bits together and rotate right by high 6 bits of state. + return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) +} + +func (pcg *PCGSource) add() { + var carry uint64 + pcg.low, carry = Add64(pcg.low, incLow, 0) + pcg.high, _ = Add64(pcg.high, incHigh, carry) +} + +func (pcg *PCGSource) multiply() { + hi, lo := Mul64(pcg.low, mulLow) + hi += pcg.high * mulLow + hi += pcg.low * mulHigh + pcg.low = lo + pcg.high = hi +} + +// MarshalBinary returns the binary representation of the current state of the generator. +func (pcg *PCGSource) MarshalBinary() ([]byte, error) { + var buf [16]byte + binary.BigEndian.PutUint64(buf[:8], pcg.high) + binary.BigEndian.PutUint64(buf[8:], pcg.low) + return buf[:], nil +} + +// UnmarshalBinary sets the state of the generator to the state represented in data. +func (pcg *PCGSource) UnmarshalBinary(data []byte) error { + if len(data) < 16 { + return io.ErrUnexpectedEOF + } + pcg.low = binary.BigEndian.Uint64(data[8:]) + pcg.high = binary.BigEndian.Uint64(data[:8]) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go index d7b753b7..96160743 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -11,67 +11,29 @@ import ( crand "crypto/rand" "fmt" "io" - "math/rand" - "sync" -) - -// A LockedRand wraps a "math/rand".Rand and is safe to use from multiple goroutines. -type LockedRand struct { - mu sync.Mutex - r *rand.Rand -} - -// NewLockedRand returns a new LockedRand that uses random values from src to generate other random -// values. It is safe to use from multiple goroutines. -func NewLockedRand(src rand.Source) *LockedRand { - return &LockedRand{ - // Ignore gosec warning "Use of weak random number generator (math/rand instead of - // crypto/rand)". We intentionally use a pseudo-random number generator. - /* #nosec G404 */ - r: rand.New(src), - } -} -// Read generates len(p) random bytes and writes them into p. It always returns len(p) and a nil -// error. -func (lr *LockedRand) Read(p []byte) (int, error) { - lr.mu.Lock() - n, err := lr.r.Read(p) - lr.mu.Unlock() - return n, err -} - -// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). It -// panics if n <= 0. -func (lr *LockedRand) Intn(n int) int { - lr.mu.Lock() - x := lr.r.Intn(n) - lr.mu.Unlock() - return x -} + xrand "go.mongodb.org/mongo-driver/internal/randutil/rand" +) -// Shuffle pseudo-randomizes the order of elements. n is the number of elements. Shuffle panics if -// n < 0. swap swaps the elements with indexes i and j. -// -// Note that Shuffle locks the LockedRand, so shuffling large collections may adversely affect other -// concurrent calls. If many concurrent Shuffle and random value calls are required, consider using -// the global "math/rand".Shuffle instead because it uses much more granular locking. -func (lr *LockedRand) Shuffle(n int, swap func(i, j int)) { - lr.mu.Lock() - lr.r.Shuffle(n, swap) - lr.mu.Unlock() +// NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a +// cryptographically-secure random number. +// It is safe to use from multiple goroutines. +func NewLockedRand() *xrand.Rand { + var randSrc = new(xrand.LockedSource) + randSrc.Seed(cryptoSeed()) + return xrand.New(randSrc) } -// CryptoSeed returns a random int64 read from the "crypto/rand" random number generator. It is +// cryptoSeed returns a random uint64 read from the "crypto/rand" random number generator. It is // intended to be used to seed pseudorandom number generators at package initialization. It panics // if it encounters any errors. -func CryptoSeed() int64 { +func cryptoSeed() uint64 { var b [8]byte _, err := io.ReadFull(crand.Reader, b[:]) if err != nil { panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err)) } - return (int64(b[0]) << 0) | (int64(b[1]) << 8) | (int64(b[2]) << 16) | (int64(b[3]) << 24) | - (int64(b[4]) << 32) | (int64(b[5]) << 40) | (int64(b[6]) << 48) | (int64(b[7]) << 56) + return (uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go similarity index 54% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go rename to vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go index 09783873..78f16645 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go @@ -4,11 +4,10 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package uuid // import "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" +package uuid import ( "io" - "math/rand" "go.mongodb.org/mongo-driver/internal/randutil" ) @@ -16,47 +15,39 @@ import ( // UUID represents a UUID. type UUID [16]byte -// A source is a UUID generator that reads random values from a randutil.LockedRand. -// It is safe to use from multiple goroutines. +// A source is a UUID generator that reads random values from a io.Reader. +// It should be safe to use from multiple goroutines. type source struct { - random *randutil.LockedRand + random io.Reader } // new returns a random UUIDv4 with bytes read from the source's random number generator. func (s *source) new() (UUID, error) { - var uuid [16]byte - + var uuid UUID _, err := io.ReadFull(s.random, uuid[:]) if err != nil { - return [16]byte{}, err + return UUID{}, err } uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil } -// newGlobalSource returns a source that uses a "math/rand" pseudo-random number generator seeded -// with a cryptographically-secure random number. It is intended to be used to initialize the -// package-global UUID generator. -func newGlobalSource() *source { +// newSource returns a source that uses a pseudo-random number generator in reandutil package. +// It is intended to be used to initialize the package-global UUID generator. +func newSource() *source { return &source{ - random: randutil.NewLockedRand(rand.NewSource(randutil.CryptoSeed())), + random: randutil.NewLockedRand(), } } // globalSource is a package-global pseudo-random UUID generator. -var globalSource = newGlobalSource() +var globalSource = newSource() -// New returns a random UUIDv4. It uses a "math/rand" pseudo-random number generator seeded with a -// cryptographically-secure random number at package initialization. +// New returns a random UUIDv4. It uses a global pseudo-random number generator in randutil +// at package initialization. // // New should not be used to generate cryptographically-secure random UUIDs. func New() (UUID, error) { return globalSource.new() } - -// Equal returns true if two UUIDs are equal. -func Equal(a, b UUID) bool { - return a == b -} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go index 0b7432f4..966e43cd 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package mongo import ( diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index e748ced6..2c58f222 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -27,6 +27,7 @@ type bulkWriteBatch struct { // bulkWrite perfoms a bulkwrite operation type bulkWrite struct { + comment interface{} ordered *bool bypassDocumentValidation *bool models []WriteModel @@ -178,7 +179,14 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation { op = op.BypassDocumentValidation(*bw.bypassDocumentValidation) } @@ -228,7 +236,14 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.let != nil { let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") if err != nil { @@ -316,7 +331,14 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI) + ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.let != nil { let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") if err != nil { @@ -395,7 +417,6 @@ func createUpdateDoc( } updateDoc, _ = bsoncore.AppendDocumentEnd(updateDoc, uidx) - return updateDoc, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go index b4b8e3ef..64f45891 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go @@ -152,7 +152,7 @@ func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel { } // SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot -// contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel { rom.Replacement = rep return rom @@ -210,7 +210,7 @@ func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel { } // SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel { uom.Update = update return uom @@ -274,7 +274,7 @@ func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel { } // SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel { umm.Update = update return umm diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index a76eb7c9..c809002a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -63,7 +64,7 @@ var ( // ChangeStream is used to iterate over a stream of events. Each event can be decoded into a Go type via the Decode // method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used // concurrently by multiple goroutines. For more information about change streams, see -// https://docs.mongodb.com/manual/changeStreams/. +// https://www.mongodb.com/docs/manual/changeStreams/. type ChangeStream struct { // Current is the BSON bytes of the current event. This property is only valid until the next call to Next or // TryNext. If continued access is required, a copy must be made. @@ -132,11 +133,20 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ReadPreference(config.readPreference).ReadConcern(config.readConcern). Deployment(cs.client.deployment).ClusterClock(cs.client.clock). CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector).Retry(driver.RetryNone). - ServerAPI(cs.client.serverAPI).Crypt(config.crypt) + ServerAPI(cs.client.serverAPI).Crypt(config.crypt).Timeout(cs.client.timeout) if cs.options.Collation != nil { cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument())) } + if comment := cs.options.Comment; comment != nil { + cs.aggregate.Comment(*comment) + + commentVal, err := transformValue(cs.registry, comment, true, "comment") + if err != nil { + return nil, err + } + cs.cursorOptions.Comment = commentVal + } if cs.options.BatchSize != nil { cs.aggregate.BatchSize(*cs.options.BatchSize) cs.cursorOptions.BatchSize = *cs.options.BatchSize @@ -261,6 +271,16 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Pipeline(plArr) } + // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already + // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution + // and potential retry. + if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !internal.IsTimeoutContext(ctx) { + newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *cs.client.timeout) + // Redefine ctx to be the new timeout-derived context. + ctx = newCtx + // Cancel the timeout-derived context at the end of executeOperation to avoid a context leak. + defer cancelFunc() + } if original := cs.aggregate.Execute(ctx); original != nil { retryableRead := cs.client.retryReads && cs.wireVersion != nil && cs.wireVersion.Max >= 6 if !retryableRead { @@ -395,7 +415,16 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { } if cs.options.FullDocument != nil { - plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + // Only append a default "fullDocument" field if wire version is less than 6 (3.6). Otherwise, + // the server will assume users want the default behavior, and "fullDocument" does not need to be + // specified. + if *cs.options.FullDocument != options.Default || (cs.wireVersion != nil && cs.wireVersion.Max < 6) { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + } + } + + if cs.options.FullDocumentBeforeChange != nil { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocumentBeforeChange", string(*cs.options.FullDocumentBeforeChange)) } if cs.options.ResumeAfter != nil { @@ -408,6 +437,10 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc) } + if cs.options.ShowExpandedEvents != nil { + plDoc = bsoncore.AppendBooleanElement(plDoc, "showExpandedEvents", *cs.options.ShowExpandedEvents) + } + if cs.options.StartAfter != nil { var saDoc bsoncore.Document saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter, true, "startAfter") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go index 36c6e254..9c61123c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go @@ -40,6 +40,10 @@ func (c *changeStreamDeployment) MinRTT() time.Duration { return c.server.MinRTT() } +func (c *changeStreamDeployment) RTT90() time.Duration { + return c.server.RTT90() +} + func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult { ep, ok := c.server.(driver.ErrorProcessor) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index ddc08bd5..d409135a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/uuid" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -25,14 +26,18 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/auth" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" - "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" ) -const defaultLocalThreshold = 15 * time.Millisecond +const ( + defaultLocalThreshold = 15 * time.Millisecond + defaultMaxPoolSize uint64 = 100 +) var ( // keyVaultCollOpts specifies options used to communicate with the key vault collection @@ -63,14 +68,16 @@ type Client struct { serverAPI *driver.ServerAPIOptions serverMonitor *event.ServerMonitor sessionPool *session.Pool + timeout *time.Duration // client-side encryption fields - keyVaultClientFLE *Client - keyVaultCollFLE *Collection - mongocryptdFLE *mcryptClient - cryptFLE driver.Crypt - metadataClientFLE *Client - internalClientFLE *Client + keyVaultClientFLE *Client + keyVaultCollFLE *Collection + mongocryptdFLE *mongocryptdClient + cryptFLE driver.Crypt + metadataClientFLE *Client + internalClientFLE *Client + encryptedFieldsMap map[string]interface{} } // Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling @@ -271,6 +278,9 @@ func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error { // StartSession does not actually communicate with the server and will not error if the client is // disconnected. // +// StartSession is safe to call from multiple goroutines concurrently. However, Sessions returned by StartSession are +// not safe for concurrent use by multiple goroutines. +// // If the DefaultReadConcern, DefaultWriteConcern, or DefaultReadPreference options are not set, the client's read // concern, write concern, or read preference will be used, respectively. func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) { @@ -348,6 +358,12 @@ func (c *Client) endSessions(ctx context.Context) { } func (c *Client) configure(opts *options.ClientOptions) error { + var defaultOptions int + // Set default options + if opts.MaxPoolSize == nil { + defaultOptions++ + opts.SetMaxPoolSize(defaultMaxPoolSize) + } if err := opts.Validate(); err != nil { return err } @@ -624,6 +640,8 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithWriteTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }), ) } + // Timeout + c.timeout = opts.Timeout // TLSConfig if opts.TLSConfig != nil { connOpts = append(connOpts, topology.WithTLSConfig( @@ -681,15 +699,16 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithClock(func(*session.ClusterClock) *session.ClusterClock { return c.clock }), topology.WithConnectionOptions(func(...topology.ConnectionOption) []topology.ConnectionOption { return connOpts }), ) - c.topologyOptions = append(topologyOpts, topology.WithServerOptions( + topologyOpts = append(topologyOpts, topology.WithServerOptions( func(...topology.ServerOption) []topology.ServerOption { return serverOpts }, )) + c.topologyOptions = topologyOpts // Deployment if opts.Deployment != nil { - // topology options: WithSeedlist, WithURI, WithSRVServiceName and WithSRVMaxHosts - // server options: WithClock and WithConnectionOptions - if len(serverOpts) > 2 || len(topologyOpts) > 4 { + // topology options: WithSeedlist, WithURI, WithSRVServiceName, WithSRVMaxHosts, and WithServerOptions + // server options: WithClock and WithConnectionOptions + default maxPoolSize + if len(serverOpts) > 2+defaultOptions || len(topologyOpts) > 5 { return errors.New("cannot specify topology or server options with a deployment") } c.deployment = opts.Deployment @@ -699,16 +718,30 @@ func (c *Client) configure(opts *options.ClientOptions) error { } func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) error { + c.encryptedFieldsMap = clientOpts.AutoEncryptionOptions.EncryptedFieldsMap if err := c.configureKeyVaultClientFLE(clientOpts); err != nil { return err } if err := c.configureMetadataClientFLE(clientOpts); err != nil { return err } - if err := c.configureMongocryptdClientFLE(clientOpts.AutoEncryptionOptions); err != nil { + + mc, err := c.newMongoCrypt(clientOpts.AutoEncryptionOptions) + if err != nil { return err } - return c.configureCryptFLE(clientOpts.AutoEncryptionOptions) + + // If the crypt_shared library was loaded successfully, signal to the mongocryptd client creator + // that it can bypass spawning mongocryptd. + cryptSharedLibAvailable := mc.CryptSharedLibVersionString() != "" + mongocryptdFLE, err := newMongocryptdClient(cryptSharedLibAvailable, clientOpts.AutoEncryptionOptions) + if err != nil { + return err + } + c.mongocryptdFLE = mongocryptdFLE + + c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions) + return nil } func (c *Client) getOrCreateInternalClient(clientOpts *options.ClientOptions) (*Client, error) { @@ -763,32 +796,90 @@ func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) e return err } -func (c *Client) configureMongocryptdClientFLE(opts *options.AutoEncryptionOptions) error { - var err error - c.mongocryptdFLE, err = newMcryptClient(opts) - return err -} - -func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { +func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) { // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) for k, v := range opts.SchemaMap { schema, err := transformBsoncoreDocument(c.registry, v, true, "schemaMap") if err != nil { - return err + return nil, err } cryptSchemaMap[k] = schema } + + // convert schemas in EncryptedFieldsMap to bsoncore documents + cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) + for k, v := range opts.EncryptedFieldsMap { + encryptedFields, err := transformBsoncoreDocument(c.registry, v, true, "encryptedFieldsMap") + if err != nil { + return nil, err + } + cryptEncryptedFieldsMap[k] = encryptedFields + } + kmsProviders, err := transformBsoncoreDocument(c.registry, opts.KmsProviders, true, "kmsProviders") if err != nil { - return fmt.Errorf("error creating KMS providers document: %v", err) + return nil, fmt.Errorf("error creating KMS providers document: %v", err) + } + + // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one + // was set. + cryptSharedLibPath := "" + if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok { + str, ok := val.(string) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibPath" to be a string, but is a %T`, val) + } + cryptSharedLibPath = str + } + + // Explicitly disable loading the crypt_shared library if requested. Note that this is ONLY + // intended for use from tests; there is no supported public API for explicitly disabling + // loading the crypt_shared library. + cryptSharedLibDisabled := false + if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { + cryptSharedLibDisabled = v.(bool) + } + + bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + SetLocalSchemaMap(cryptSchemaMap). + SetBypassQueryAnalysis(bypassQueryAnalysis). + SetEncryptedFieldsMap(cryptEncryptedFieldsMap). + SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). + SetCryptSharedLibOverridePath(cryptSharedLibPath)) + if err != nil { + return nil, err + } + + var cryptSharedLibRequired bool + if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok { + b, ok := val.(bool) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibRequired" to be a bool, but is a %T`, val) + } + cryptSharedLibRequired = b } - // configure options - var bypass bool - if opts.BypassAutoEncryption != nil { - bypass = *opts.BypassAutoEncryption + // If the "cryptSharedLibRequired" extra option is set to true, check the MongoCrypt version + // string to confirm that the library was successfully loaded. If the version string is empty, + // return an error indicating that we couldn't load the crypt_shared library. + if cryptSharedLibRequired && mc.CryptSharedLibVersionString() == "" { + return nil, errors.New( + `AutoEncryption extra option "cryptSharedLibRequired" is true, but we failed to load the crypt_shared library`) } + + return mc, nil +} + +//nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set. +func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) { + bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption kr := keyRetriever{coll: c.keyVaultCollFLE} var cir collInfoRetriever // If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever @@ -798,23 +889,19 @@ func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { cir = collInfoRetriever{client: c.metadataClientFLE} } - cryptOpts := &driver.CryptOptions{ + c.cryptFLE = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, - KmsProviders: kmsProviders, TLSConfig: opts.TLSConfig, BypassAutoEncryption: bypass, - SchemaMap: cryptSchemaMap, - } - - c.cryptFLE, err = driver.NewCrypt(cryptOpts) - return err + }) } // validSession returns an error if the session doesn't belong to the client func (c *Client) validSession(sess *session.Client) error { - if sess != nil && !uuid.Equal(sess.ClientID, c.id) { + if sess != nil && sess.ClientID != c.id { return ErrWrongClient } return nil @@ -845,7 +932,7 @@ func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Databa // // The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) { if ctx == nil { ctx = context.Background() @@ -885,7 +972,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... op := operation.NewListDatabases(filterDoc). Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor). ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.deployment).Crypt(c.cryptFLE). - ServerAPI(c.serverAPI) + ServerAPI(c.serverAPI).Timeout(c.timeout) if ldo.NameOnly != nil { op = op.NameOnly(*ldo.NameOnly) @@ -918,7 +1005,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... // The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions // documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) { opts = append(opts, options.ListDatabases().SetNameOnly(true)) @@ -939,6 +1026,9 @@ func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts // SessionContext must be used as the Context parameter for any operations in the fn callback that should be executed // under the session. // +// WithSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// WithSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the one provided. // // Any error returned by the fn callback will be returned without any modifications. @@ -951,6 +1041,9 @@ func WithSession(ctx context.Context, sess Session, fn func(SessionContext) erro // be executed under a session. After the callback returns, the created Session is ended, meaning that any in-progress // transactions started by fn will be aborted even if fn returns an error. // +// UseSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// UseSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the newly created one. // // Any error returned by the fn callback will be returned without any modifications. @@ -959,6 +1052,9 @@ func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) } // UseSessionWithOptions operates like UseSession but uses the given SessionOptions to create the Session. +// +// UseSessionWithOptions is safe to call from multiple goroutines concurrently. However, the SessionContext passed to +// the UseSessionWithOptions callback function is not safe for concurrent use by multiple goroutines. func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error { defaultSess, err := c.StartSession(opts) if err != nil { @@ -970,13 +1066,13 @@ func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.Sessio } // Watch returns a change stream for all changes on the deployment. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The client must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil or empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for a list +// nil or empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for a list // of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the mongo.Pipeline{} // type can be used. // diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index fe4646b6..f88b7bed 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -17,7 +17,8 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" - cryptOpts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) // ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values. @@ -47,36 +48,56 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti return nil, fmt.Errorf("error creating KMS providers map: %v", err) } + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + // Explicitly disable loading the crypt_shared library for the Crypt used for + // ClientEncryption because it's only needed for AutoEncryption and we don't expect users to + // have the crypt_shared library installed if they're using ClientEncryption. + SetCryptSharedLibDisabled(true)) + if err != nil { + return nil, err + } + // create Crypt kr := keyRetriever{coll: ce.keyVaultColl} cir := collInfoRetriever{client: ce.keyVaultClient} - ce.crypt, err = driver.NewCrypt(&driver.CryptOptions{ - KeyFn: kr.cryptKeys, - CollInfoFn: cir.cryptCollInfo, - KmsProviders: kmsProviders, - TLSConfig: ceo.TLSConfig, + ce.crypt = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, + KeyFn: kr.cryptKeys, + CollInfoFn: cir.cryptCollInfo, + TLSConfig: ceo.TLSConfig, }) - if err != nil { - return nil, err - } return ce, nil } -// CreateDataKey creates a new key document and inserts it into the key vault collection. Returns the _id of the -// created document. -func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, opts ...*options.DataKeyOptions) (primitive.Binary, error) { - // translate opts to cryptOpts.DataKeyOptions +// AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the +// given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + keyAltNameDoc := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + update := bsoncore.NewDocumentBuilder().AppendDocument("$addToSet", keyAltNameDoc).Build() + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// CreateDataKey creates a new key document and inserts into the key vault collection. Returns the _id of the created +// document as a UUID (BSON binary subtype 0x04). +func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, + opts ...*options.DataKeyOptions) (primitive.Binary, error) { + + // translate opts to mcopts.DataKeyOptions dko := options.MergeDataKeyOptions(opts...) - co := cryptOpts.DataKey().SetKeyAltNames(dko.KeyAltNames) + co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames) if dko.MasterKey != nil { keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, dko.MasterKey, true, "masterKey") if err != nil { return primitive.Binary{}, err } - co.SetMasterKey(keyDoc) } + if dko.KeyMaterial != nil { + co.SetKeyMaterial(dko.KeyMaterial) + } // create data key document dataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co) @@ -95,9 +116,11 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin } // Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). -func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts ...*options.EncryptOptions) (primitive.Binary, error) { +func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, + opts ...*options.EncryptOptions) (primitive.Binary, error) { + eo := options.MergeEncryptOptions(opts...) - transformed := cryptOpts.ExplicitEncryption() + transformed := mcopts.ExplicitEncryption() if eo.KeyID != nil { transformed.SetKeyID(*eo.KeyID) } @@ -105,6 +128,11 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts transformed.SetKeyAltName(*eo.KeyAltName) } transformed.SetAlgorithm(eo.Algorithm) + transformed.SetQueryType(eo.QueryType) + + if eo.ContentionFactor != nil { + transformed.SetContentionFactor(*eo.ContentionFactor) + } subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed) if err != nil { @@ -130,6 +158,143 @@ func (ce *ClientEncryption) Close(ctx context.Context) error { return ce.keyVaultClient.Disconnect(ctx) } +// DeleteKey removes the key document with the given UUID (BSON binary subtype 0x04) from the key vault collection. +// Returns the result of the internal deleteOne() operation on the key vault collection. +func (ce *ClientEncryption) DeleteKey(ctx context.Context, id primitive.Binary) (*DeleteResult, error) { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.DeleteOne(ctx, filter) +} + +// GetKeyByAltName returns a key document in the key vault collection with the given keyAltName. +func (ce *ClientEncryption) GetKeyByAltName(ctx context.Context, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKey finds a single key document with the given UUID (BSON binary subtype 0x04). Returns the result of the +// internal find() operation on the key vault collection. +func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKeys finds all documents in the key vault collection. Returns the result of the internal find() operation on the +// key vault collection. +func (ce *ClientEncryption) GetKeys(ctx context.Context) (*Cursor, error) { + return ce.keyVaultColl.Find(ctx, bson.D{}) +} + +// RemoveKeyAltName removes a keyAltName from the keyAltNames array of the key document in the key vault collection with +// the given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) RemoveKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + update := bson.A{bson.D{{"$set", bson.D{{"keyAltNames", bson.D{{"$cond", bson.A{bson.D{{"$eq", + bson.A{"$keyAltNames", bson.A{keyAltName}}}}, "$$REMOVE", bson.D{{"$filter", + bson.D{{"input", "$keyAltNames"}, {"cond", bson.D{{"$ne", bson.A{"$$this", keyAltName}}}}}}}}}}}}}}} + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// setRewrapManyDataKeyWriteModels will prepare the WriteModel slice for a bulk updating rewrapped documents. +func setRewrapManyDataKeyWriteModels(rewrappedDocuments []bsoncore.Document, writeModels *[]WriteModel) error { + const idKey = "_id" + const keyMaterial = "keyMaterial" + const masterKey = "masterKey" + + if writeModels == nil { + return fmt.Errorf("writeModels pointer not set for location referenced") + } + + // Append a slice of WriteModel with the update document per each rewrappedDoc _id filter. + for _, rewrappedDocument := range rewrappedDocuments { + // Prepare the new master key for update. + masterKeyValue, err := rewrappedDocument.LookupErr(masterKey) + if err != nil { + return err + } + masterKeyDoc := masterKeyValue.Document() + + // Prepare the new material key for update. + keyMaterialValue, err := rewrappedDocument.LookupErr(keyMaterial) + if err != nil { + return err + } + keyMaterialSubtype, keyMaterialData := keyMaterialValue.Binary() + keyMaterialBinary := primitive.Binary{Subtype: keyMaterialSubtype, Data: keyMaterialData} + + // Prepare the _id filter for documents to update. + id, err := rewrappedDocument.LookupErr(idKey) + if err != nil { + return err + } + + idSubtype, idData, ok := id.BinaryOK() + if !ok { + return fmt.Errorf("expected to assert %q as binary, got type %T", idKey, id) + } + binaryID := primitive.Binary{Subtype: idSubtype, Data: idData} + + // Append the mutable document to the slice for bulk update. + *writeModels = append(*writeModels, NewUpdateOneModel(). + SetFilter(bson.D{{idKey, binaryID}}). + SetUpdate( + bson.D{ + {"$set", bson.D{{keyMaterial, keyMaterialBinary}, {masterKey, masterKeyDoc}}}, + {"$currentDate", bson.D{{"updateDate", true}}}, + }, + )) + } + return nil +} + +// RewrapManyDataKey decrypts and encrypts all matching data keys with a possibly new masterKey value. For all +// matching documents, this method will overwrite the "masterKey", "updateDate", and "keyMaterial". On error, some +// matching data keys may have been rewrapped. +func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interface{}, + opts ...*options.RewrapManyDataKeyOptions) (*RewrapManyDataKeyResult, error) { + + rmdko := options.MergeRewrapManyDataKeyOptions(opts...) + if ctx == nil { + ctx = context.Background() + } + + // Transfer rmdko options to /x/ package options to publish the mongocrypt feed. + co := mcopts.RewrapManyDataKey() + if rmdko.MasterKey != nil { + keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, rmdko.MasterKey, true, "masterKey") + if err != nil { + return nil, err + } + co.SetMasterKey(keyDoc) + } + if rmdko.Provider != nil { + co.SetProvider(*rmdko.Provider) + } + + // Prepare the filters and rewrap the data key using mongocrypt. + filterdoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, filter, true, "filter") + if err != nil { + return nil, err + } + + rewrappedDocuments, err := ce.crypt.RewrapDataKey(ctx, filterdoc, co) + if err != nil { + return nil, err + } + if len(rewrappedDocuments) == 0 { + // If there are no documents to rewrap, then do nothing. + return new(RewrapManyDataKeyResult), nil + } + + // Prepare the WriteModel slice for bulk updating the rewrapped data keys. + models := []WriteModel{} + if err := setRewrapManyDataKeyWriteModels(rewrappedDocuments, &models); err != nil { + return nil, err + } + + bulkWriteResults, err := ce.keyVaultColl.BulkWrite(ctx, models) + return &RewrapManyDataKeyResult{BulkWriteResult: bulkWriteResults}, err +} + // splitNamespace takes a namespace in the form "database.collection" and returns (database name, collection name) func splitNamespace(ns string) (string, string) { firstDot := strings.Index(ns, ".") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 590d9280..aa3ffbe9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -166,7 +167,7 @@ func (coll *Collection) Database() *Database { return coll.db } -// BulkWrite performs a bulk write operation (https://docs.mongodb.com/manual/core/bulk-write-operations/). +// BulkWrite performs a bulk write operation (https://www.mongodb.com/docs/manual/core/bulk-write-operations/). // // The models parameter must be a slice of operations to be executed in this bulk write. It cannot be nil or empty. // All of the models must be non-nil. See the mongo.WriteModel documentation for a list of valid model types and @@ -218,6 +219,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, bwo := options.MergeBulkWriteOptions(opts...) op := bulkWrite{ + comment: bwo.Comment, ordered: bwo.Ordered, bypassDocumentValidation: bwo.BypassDocumentValidation, models: models, @@ -281,11 +283,18 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) imo := options.MergeInsertManyOptions(opts...) if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation { op = op.BypassDocumentValidation(*imo.BypassDocumentValidation) } + if imo.Comment != nil { + comment, err := transformValue(coll.registry, imo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if imo.Ordered != nil { op = op.Ordered(*imo.Ordered) } @@ -324,7 +333,7 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertOneOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertOne(ctx context.Context, document interface{}, opts ...*options.InsertOneOptions) (*InsertOneResult, error) { @@ -334,6 +343,9 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, if ioOpts.BypassDocumentValidation != nil && *ioOpts.BypassDocumentValidation { imOpts.SetBypassDocumentValidation(*ioOpts.BypassDocumentValidation) } + if ioOpts.Comment != nil { + imOpts.SetComment(ioOpts.Comment) + } res, err := coll.insert(ctx, []interface{}{document}, imOpts) rr, err := processWriteError(err) @@ -353,7 +365,7 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertManyOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertMany(ctx context.Context, documents []interface{}, opts ...*options.InsertManyOptions) (*InsertManyResult, error) { @@ -451,7 +463,14 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + if do.Comment != nil { + comment, err := transformValue(coll.registry, do.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if do.Hint != nil { op = op.Hint(true) } @@ -485,7 +504,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -501,7 +520,7 @@ func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -555,7 +574,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). - ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI) + ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if uo.Let != nil { let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") if err != nil { @@ -567,6 +587,13 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation { op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) } + if uo.Comment != nil { + comment, err := transformValue(coll.registry, uo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } retry := driver.RetryNone // retryable writes are only enabled updateOne/replaceOne operations if !multi && coll.client.retryWrites { @@ -601,12 +628,12 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc // the operation will succeed and an UpdateResult with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { if id == nil { @@ -623,12 +650,12 @@ func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update i // matched set and MatchedCount will equal 1. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -651,12 +678,12 @@ func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, updat // with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected documents. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -680,11 +707,11 @@ func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, upda // selected from the matched set and MatchedCount will equal 1. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.ReplaceOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) { @@ -717,6 +744,7 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, uOpts.Upsert = opt.Upsert uOpts.Hint = opt.Hint uOpts.Let = opt.Let + uOpts.Comment = opt.Comment updateOptions = append(updateOptions, uOpts) } @@ -728,12 +756,12 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, // The pipeline parameter must be an array of documents, each representing an aggregation stage. The pipeline cannot // be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of // valid stages in aggregations. // // The opts parameter can be used to specify options for the operation (see the options.AggregateOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -754,7 +782,7 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, return aggregate(a) } -// aggreate is the helper method for Aggregate +// aggregate is the helper method for Aggregate func aggregate(a aggregateParams) (cur *Cursor, err error) { if a.ctx == nil { a.ctx = context.Background() @@ -817,7 +845,8 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { Deployment(a.client.deployment). Crypt(a.client.cryptFLE). ServerAPI(a.client.serverAPI). - HasOutputStage(hasOutputStage) + HasOutputStage(hasOutputStage). + Timeout(a.client.timeout) if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) @@ -841,6 +870,12 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { } if ao.Comment != nil { op.Comment(*ao.Comment) + + commentVal, err := transformValue(a.registry, ao.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if ao.Hint != nil { hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") @@ -935,10 +970,14 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewAggregate(pipelineArr).Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector).ClusterClock(coll.client.clock).Database(coll.db.name). - Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if countOpts.Collation != nil { op.Collation(bsoncore.Document(countOpts.Collation.ToDocument())) } + if countOpts.Comment != nil { + op.Comment(*countOpts.Comment) + } if countOpts.MaxTime != nil { op.MaxTimeMS(int64(*countOpts.MaxTime / time.Millisecond)) } @@ -984,7 +1023,7 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, // The opts parameter can be used to specify options for the operation (see the options.EstimatedDocumentCountOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/count/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/count/. func (coll *Collection) EstimatedDocumentCount(ctx context.Context, opts ...*options.EstimatedDocumentCountOptions) (int64, error) { @@ -1017,9 +1056,17 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, op := operation.NewCount().Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) co := options.MergeEstimatedDocumentCountOptions(opts...) + if co.Comment != nil { + comment, err := transformValue(coll.registry, co.Comment, false, "comment") + if err != nil { + return 0, err + } + op = op.Comment(comment) + } if co.MaxTime != nil { op = op.MaxTimeMS(int64(*co.MaxTime / time.Millisecond)) } @@ -1043,7 +1090,7 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, // // The opts parameter can be used to specify options for the operation (see the options.DistinctOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/distinct/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/distinct/. func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{}, opts ...*options.DistinctOptions) ([]interface{}, error) { @@ -1083,11 +1130,19 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if option.Collation != nil { op.Collation(bsoncore.Document(option.Collation.ToDocument())) } + if option.Comment != nil { + comment, err := transformValue(coll.registry, option.Comment, true, "comment") + if err != nil { + return nil, err + } + op.Comment(comment) + } if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) } @@ -1132,7 +1187,7 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // // The opts parameter can be used to specify options for the operation (see the options.FindOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (cur *Cursor, err error) { @@ -1175,7 +1230,8 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). - Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) fo := options.MergeFindOptions(opts...) cursorOpts := coll.client.createBaseCursorOptions() @@ -1195,6 +1251,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } if fo.Comment != nil { op.Comment(*fo.Comment) + + commentVal, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if fo.CursorType != nil { switch *fo.CursorType { @@ -1305,7 +1367,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for this operation (see the options.FindOneOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *SingleResult { @@ -1411,7 +1473,7 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd // The opts parameter can be used to specify options for the operation (see the options.FindOneAndDeleteOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*options.FindOneAndDeleteOptions) *SingleResult { @@ -1420,10 +1482,17 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} return &SingleResult{err: err} } fod := options.MergeFindOneAndDeleteOptions(opts...) - op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fod.Collation != nil { op = op.Collation(bsoncore.Document(fod.Collation.ToDocument())) } + if fod.Comment != nil { + comment, err := transformValue(coll.registry, fod.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fod.MaxTime != nil { op = op.MaxTimeMS(int64(*fod.MaxTime / time.Millisecond)) } @@ -1467,12 +1536,12 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndReplaceOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult { @@ -1490,13 +1559,20 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ fo := options.MergeFindOneAndReplaceOptions(opts...) op := operation.NewFindAndModify(f).Update(bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: r}). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation { op = op.BypassDocumentValidation(*fo.BypassDocumentValidation) } if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1546,13 +1622,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndUpdateOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult { @@ -1566,7 +1642,7 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } fo := options.MergeFindOneAndUpdateOptions(opts...) - op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) u, err := transformUpdateValue(coll.registry, update, true) if err != nil { @@ -1587,6 +1663,13 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1629,13 +1712,13 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } // Watch returns a change stream for all changes on the corresponding collection. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Collection must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -1665,6 +1748,69 @@ func (coll *Collection) Indexes() IndexView { // Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop // a collection that does not exist on the server. func (coll *Collection) Drop(ctx context.Context) error { + // Follow Client-Side Encryption specification to check for encryptedFields. + // Drop does not have an encryptedFields option. See: GODRIVER-2413. + // Check for encryptedFields from the client EncryptedFieldsMap. + // Check for encryptedFields from the server if EncryptedFieldsMap is set. + ef := coll.db.getEncryptedFieldsFromMap(coll.name) + if ef == nil && coll.db.client.encryptedFieldsMap != nil { + var err error + if ef, err = coll.db.getEncryptedFieldsFromServer(ctx, coll.name); err != nil { + return err + } + } + + if ef != nil { + return coll.dropEncryptedCollection(ctx, ef) + } + + return coll.drop(ctx) +} + +// dropEncryptedCollection drops a collection with EncryptedFields. +func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { + efBSON, err := transformBsoncoreDocument(coll.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Drop the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Drop ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedStateCollection) + if err != nil { + return err + } + if err := coll.db.Collection(escCollection).drop(ctx); err != nil { + return err + } + + // Drop ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + if err := coll.db.Collection(eccCollection).drop(ctx); err != nil { + return err + } + + // Drop ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + if err := coll.db.Collection(ecocCollection).drop(ctx); err != nil { + return err + } + + // Drop the data collection. + if err := coll.drop(ctx); err != nil { + return err + } + return nil +} + +// drop drops a collection without EncryptedFields. +func (coll *Collection) drop(ctx context.Context) error { if ctx == nil { ctx = context.Background() } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index 533cfce0..d21005fe 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -125,7 +125,7 @@ func (c *Cursor) Next(ctx context.Context) bool { // TryNext attempts to get the next document for this cursor. It returns true if there were no errors and the next // document is available. This is only recommended for use with tailable cursors as a non-blocking alternative to -// Next. See https://docs.mongodb.com/manual/core/tailable-cursors/ for more information about tailable cursors. +// Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors. // // TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next // document is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err(). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index b0066f04..57b5417f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -13,12 +13,12 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -107,12 +107,12 @@ func (db *Database) Collection(name string, opts ...*options.CollectionOptions) // The pipeline parameter must be a slice of documents, each representing an aggregation stage. The pipeline // cannot be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid // stages in database-level aggregations. // // The opts parameter can be used to specify options for this operation (see the options.AggregateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (db *Database) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -176,7 +176,8 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). - Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI), sess, nil + Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). + Timeout(db.client.timeout), sess, nil } // RunCommand executes the given command against the database. This function does not obey the Database's read @@ -184,11 +185,13 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. -// Specifying API versioning options in the command document and declaring an API version on the client is not supported. -// The behavior of RunCommand is undefined in this case. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommand is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult { if ctx == nil { ctx = context.Background() @@ -217,9 +220,13 @@ func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommandCursor is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -302,7 +309,7 @@ func (db *Database) Drop(ctx context.Context) error { // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. // // BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running // against MongoDB version 2.6. @@ -339,7 +346,7 @@ func (db *Database) ListCollectionSpecifications(ctx context.Context, filter int // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. // // BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against // MongoDB version 2.6. @@ -378,7 +385,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor). ServerSelector(selector).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE). - ServerAPI(db.client.serverAPI) + ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() if lco.NameOnly != nil { @@ -423,7 +430,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. // // BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against // MongoDB version 2.6. @@ -439,19 +446,13 @@ func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, names := make([]string, 0) for res.Next(ctx) { - next := &bsonx.Doc{} - err = res.Decode(next) - if err != nil { - return nil, err - } - - elem, err := next.LookupErr("name") + elem, err := res.Current.LookupErr("name") if err != nil { return nil, err } - if elem.Type() != bson.TypeString { - return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString) + if elem.Type != bson.TypeString { + return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type, bson.TypeString) } elemName := elem.StringValue() @@ -478,13 +479,13 @@ func (db *Database) WriteConcern() *writeconcern.WriteConcern { } // Watch returns a change stream for all changes to the corresponding database. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Database must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be a slice of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -512,8 +513,141 @@ func (db *Database) Watch(ctx context.Context, pipeline interface{}, // The opts parameter can be used to specify options for the operation (see the options.CreateCollectionOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/create/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/create/. func (db *Database) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + cco := options.MergeCreateCollectionOptions(opts...) + // Follow Client-Side Encryption specification to check for encryptedFields. + // Check for encryptedFields from create options. + ef := cco.EncryptedFields + // Check for encryptedFields from the client EncryptedFieldsMap. + if ef == nil { + ef = db.getEncryptedFieldsFromMap(name) + } + if ef != nil { + return db.createCollectionWithEncryptedFields(ctx, name, ef, opts...) + } + + return db.createCollection(ctx, name, opts...) +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by running the "listCollections" command. +// Returns nil and no error if the listCollections command succeeds, but "encryptedFields" is not present. +func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collectionName string) (interface{}, error) { + // Check if collection has an EncryptedFields configured server-side. + collSpecs, err := db.ListCollectionSpecifications(ctx, bson.D{{"name", collectionName}}) + if err != nil { + return nil, err + } + if len(collSpecs) == 0 { + return nil, nil + } + if len(collSpecs) > 1 { + return nil, fmt.Errorf("expected 1 or 0 results from listCollections, got %v", len(collSpecs)) + } + collSpec := collSpecs[0] + rawValue, err := collSpec.Options.LookupErr("encryptedFields") + if err == bsoncore.ErrElementNotFound { + return nil, nil + } else if err != nil { + return nil, err + } + + encryptedFields, ok := rawValue.DocumentOK() + if !ok { + return nil, fmt.Errorf("expected encryptedFields of %v to be document, got %v", collectionName, rawValue.Type) + } + + return encryptedFields, nil +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. +func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { + // Check the EncryptedFieldsMap + efMap := db.client.encryptedFieldsMap + if efMap == nil { + return nil + } + + namespace := db.name + "." + collectionName + + ef, ok := efMap[namespace] + if ok { + return ef + } + return nil +} + +// createCollectionWithEncryptedFields creates a collection with an EncryptedFields. +func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { + efBSON, err := transformBsoncoreDocument(db.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Create the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + + stateCollectionOpts := options.CreateCollection(). + SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}}) + // Create ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedStateCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, escCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, eccCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, ecocCollection, stateCollectionOpts); err != nil { + return err + } + + // Create a data collection with the 'encryptedFields' option. + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + + op.EncryptedFields(efBSON) + if err := db.executeCreateOperation(ctx, op); err != nil { + return err + } + + // Create an index on the __safeContent__ field in the collection @collectionName. + if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil { + return fmt.Errorf("error creating safeContent index: %v", err) + } + + return nil +} + +// createCollection creates a collection without EncryptedFields. +func (db *Database) createCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + return db.executeCreateOperation(ctx, op) +} + +func (db *Database) createCollectionOperation(name string, opts ...*options.CreateCollectionOptions) (*operation.Create, error) { cco := options.MergeCreateCollectionOptions(opts...) op := operation.NewCreate(name).ServerAPI(db.client.serverAPI) @@ -523,19 +657,26 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Collation != nil { op.Collation(bsoncore.Document(cco.Collation.ToDocument())) } + if cco.ChangeStreamPreAndPostImages != nil { + csppi, err := transformBsoncoreDocument(db.registry, cco.ChangeStreamPreAndPostImages, true, "changeStreamPreAndPostImages") + if err != nil { + return nil, err + } + op.ChangeStreamPreAndPostImages(csppi) + } if cco.DefaultIndexOptions != nil { idx, doc := bsoncore.AppendDocumentStart(nil) if cco.DefaultIndexOptions.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.DefaultIndexOptions.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } doc = bsoncore.AppendDocumentElement(doc, "storageEngine", storageEngine) } doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.IndexOptionDefaults(doc) @@ -549,7 +690,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } op.StorageEngine(storageEngine) } @@ -562,7 +703,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Validator != nil { validator, err := transformBsoncoreDocument(db.registry, cco.Validator, true, "validator") if err != nil { - return err + return nil, err } op.Validator(validator) } @@ -582,17 +723,24 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.TimeSeries(doc) } + if cco.ClusteredIndex != nil { + clusteredIndex, err := transformBsoncoreDocument(db.registry, cco.ClusteredIndex, true, "clusteredIndex") + if err != nil { + return nil, err + } + op.ClusteredIndex(clusteredIndex) + } - return db.executeCreateOperation(ctx, op) + return op, nil } // CreateView executes a create command to explicitly create a view on the server. See -// https://docs.mongodb.com/manual/core/views/ for more information about views. This method requires driver version >= +// https://www.mongodb.com/docs/manual/core/views/ for more information about views. This method requires driver version >= // 1.4.0 and MongoDB version >= 3.4. // // The viewName parameter specifies the name of the view to create. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index 669aa14c..76a063fa 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -105,8 +105,14 @@ // // Note: Auto encryption is an enterprise-only feature. // -// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.3.0 or higher is -// required when using driver version 1.8.0 or higher. To install libmongocrypt, follow the instructions for your +// The libmongocrypt C library is required when using client-side encryption. Specific versions of libmongocrypt +// are required for different versions of the Go Driver: +// - Go Driver v1.2.0 requires libmongocrypt v1.0.0 or higher +// - Go Driver v1.5.0 requires libmongocrypt v1.1.0 or higher +// - Go Driver v1.8.0 requires libmongocrypt v1.3.0 or higher +// - Go Driver v1.10.0 requires libmongocrypt v1.5.0 or higher +// +// To install libmongocrypt, follow the instructions for your // operating system: // // 1. Linux: follow the instructions listed at @@ -117,6 +123,7 @@ // to install packages via brew and compile the libmongocrypt source code. // // 3. Windows: +// // mkdir -p c:/libmongocrypt/bin // mkdir -p c:/libmongocrypt/include // @@ -128,18 +135,8 @@ // cp ./include/mongocrypt/*.h c:/libmongocrypt/include // export PATH=$PATH:/cygdrive/c/libmongocrypt/bin // -// libmongocrypt communicates with the mongocryptd process for automatic encryption. This process can be started manually -// or auto-spawned by the driver itself. To enable auto-spawning, ensure the process binary is on the PATH. To start it -// manually, use AutoEncryptionOptions: -// -// aeo := options.AutoEncryption() -// mongocryptdOpts := map[string]interface{}{ -// "mongocryptdBypassSpawn": true, -// } -// aeo.SetExtraOptions(mongocryptdOpts) -// To specify a process URI for mongocryptd, the "mongocryptdURI" option can be passed in the ExtraOptions map as well. -// See the ClientSideEncryption and ClientSideEncryptionCreateKey examples below for code samples about using this -// feature. +// libmongocrypt communicates with the mongocryptd process or mongo_crypt shared library for automatic encryption. +// See AutoEncryptionOpts.SetExtraOptions for options to configure use of mongocryptd or mongo_crypt. // -// [1] See https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format +// [1] See https://www.mongodb.com/docs/manual/reference/connection-string/#dns-seedlist-connection-format package mongo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index a16efab0..33e23573 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -104,6 +104,9 @@ func IsTimeout(err error) bool { if err == context.DeadlineExceeded { return true } + if err == driver.ErrDeadlineWouldBeExceeded { + return true + } if ne, ok := err.(net.Error); ok { return ne.Timeout() } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index e8e260f1..a393c7e7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -45,7 +45,7 @@ type IndexView struct { // IndexModel represents a new index to be created. type IndexModel struct { // A document describing which keys should be used for the index. It cannot be nil. This must be an order-preserving - // type such as bson.D. Map types such as bson.M are not valid. See https://docs.mongodb.com/manual/indexes/#indexes + // type such as bson.D. Map types such as bson.M are not valid. See https://www.mongodb.com/docs/manual/indexes/#indexes // for examples of valid documents. Keys interface{} @@ -65,7 +65,7 @@ func isNamespaceNotFoundError(err error) bool { // The opts parameter can be used to specify options for this operation (see the options.ListIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listIndexes/. func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -95,7 +95,8 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() lio := options.MergeListIndexesOptions(opts...) @@ -175,7 +176,7 @@ func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*op // The opts parameter can be used to specify options for this operation (see the options.CreateIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/createIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/createIndexes/. func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) { names := make([]string, 0, len(models)) @@ -256,7 +257,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). - Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) @@ -400,7 +402,8 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if dio.MaxTime != nil { op.MaxTimeMS(int64(*dio.MaxTime / time.Millisecond)) } @@ -427,7 +430,7 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) { if name == "*" { return nil, ErrMultipleIndexDrop @@ -443,7 +446,7 @@ func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.D // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) { return iv.drop(ctx, "*", opts...) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index da29175c..80282527 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -123,17 +123,6 @@ func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonco return doc, id, nil } -func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) { - if doc, ok := val.(bsonx.Doc); ok { - return doc.Copy(), nil - } - b, err := transformBsoncoreDocument(registry, val, true, "document") - if err != nil { - return nil, err - } - return bsonx.ReadDoc(b) -} - func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Document, error) { if registry == nil { registry = bson.DefaultRegistry diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index c36b1d31..016ccef6 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -28,17 +28,18 @@ const ( var defaultTimeoutArgs = []string{"--idleShutdownTimeoutSecs=60"} var databaseOpts = options.Database().SetReadConcern(readconcern.New()).SetReadPreference(readpref.Primary()) -type mcryptClient struct { +type mongocryptdClient struct { bypassSpawn bool client *Client path string spawnArgs []string } -func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) { +func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool + if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok { bypassSpawn = bypass.(bool) } @@ -46,10 +47,15 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) bypassAutoEncryption = *opts.BypassAutoEncryption } - mc := &mcryptClient{ - // mongocryptd should not be spawned if mongocryptdBypassSpawn is passed or if bypassAutoEncryption is - // specified because it is not used during decryption - bypassSpawn: bypassSpawn || bypassAutoEncryption, + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc := &mongocryptdClient{ + // mongocryptd should not be spawned if any of these conditions are true: + // - mongocryptdBypassSpawn is passed + // - bypassAutoEncryption is true because mongocryptd is not used during decryption + // - bypassQueryAnalysis is true because mongocryptd is not used during decryption + // - the crypt_shared library is available because it replaces all mongocryptd functionality. + bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis || cryptSharedLibAvailable, } if !mc.bypassSpawn { @@ -76,7 +82,7 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) } // markCommand executes the given command on mongocryptd. -func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { +func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { // Remove the explicit session from the context if one is set. // The explicit session will be from a different client. // If an explicit session is set, it is applied after automatic encryption. @@ -105,16 +111,16 @@ func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bson } // connect connects the underlying Client instance. This must be called before performing any mark operations. -func (mc *mcryptClient) connect(ctx context.Context) error { +func (mc *mongocryptdClient) connect(ctx context.Context) error { return mc.client.Connect(ctx) } // disconnect disconnects the underlying Client instance. This should be called after all operations have completed. -func (mc *mcryptClient) disconnect(ctx context.Context) error { +func (mc *mongocryptdClient) disconnect(ctx context.Context) error { return mc.client.Disconnect(ctx) } -func (mc *mcryptClient) spawnProcess() error { +func (mc *mongocryptdClient) spawnProcess() error { // Ignore gosec warning about subprocess launched with externally-provided path variable. /* #nosec G204 */ cmd := exec.Command(mc.path, mc.spawnArgs...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index cf0da5fc..983eba24 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -23,7 +23,7 @@ type AggregateOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool @@ -34,6 +34,10 @@ type AggregateOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there // is no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the Aggregate operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. @@ -41,7 +45,7 @@ type AggregateOptions struct { MaxAwaitTime *time.Duration // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. - // The default is the empty string, which means that no comment will be included in the logs. + // The default is nil, which means that no comment will be included in the logs. Comment *string // The index to use for the aggregation. This should either be the index name as a string or the index specification @@ -91,6 +95,10 @@ func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. +// The more general Timeout option should be used in its place to control the amount of time that the +// Aggregate operation can run before returning an error. func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions { ao.MaxTime = &d return ao diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index 89c3c05f..375d8999 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -32,6 +32,8 @@ type AutoEncryptionOptions struct { BypassAutoEncryption *bool ExtraOptions map[string]interface{} TLSConfig map[string]*tls.Config + EncryptedFieldsMap map[string]interface{} + BypassQueryAnalysis *bool } // AutoEncryption creates a new AutoEncryptionOptions configured with default values. @@ -90,7 +92,35 @@ func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryp return a } -// SetExtraOptions specifies a map of options to configure the mongocryptd process. +// SetExtraOptions specifies a map of options to configure the mongocryptd process or mongo_crypt shared library. +// +// Supported Extra Options +// +// "mongocryptdURI" - The mongocryptd URI. Allows setting a custom URI used to communicate with the +// mongocryptd process. The default is "mongodb://localhost:27020", which works with the default +// mongocryptd process spawned by the Client. Must be a string. +// +// "mongocryptdBypassSpawn" - If set to true, the Client will not attempt to spawn a mongocryptd +// process. Must be a bool. +// +// "mongocryptdSpawnPath" - The path used when spawning mongocryptd. +// Defaults to empty string and spawns mongocryptd from system path. Must be a string. +// +// "mongocryptdSpawnArgs" - Command line arguments passed when spawning mongocryptd. +// Defaults to ["--idleShutdownTimeoutSecs=60"]. Must be an array of strings. +// +// "cryptSharedLibRequired" - If set to true, Client creation will return an error if the +// crypt_shared library is not loaded. If unset or set to false, Client creation will not return an +// error if the crypt_shared library is not loaded. The default is unset. Must be a bool. +// +// "cryptSharedLibPath" - The crypt_shared library override path. This must be the path to the +// crypt_shared dynamic library file (for example, a .so, .dll, or .dylib file), not the directory +// that contains it. If the override path is a relative path, it will be resolved relative to the +// working directory of the process. If the override path is a relative path and the first path +// component is the literal string "$ORIGIN", the "$ORIGIN" component will be replaced by the +// absolute path to the directory containing the linked libmongocrypt library. Setting an override +// path disables the default system library search path. If an override path is specified but the +// crypt_shared library cannot be loaded, Client creation will return an error. Must be a string. func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions { a.ExtraOptions = extraOpts return a @@ -113,6 +143,22 @@ func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *Au return a } +// SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. +// EncryptedFieldsMap is used for Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { + a.EncryptedFieldsMap = ef + return a +} + +// SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. +// Use this option when using explicit encryption with Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { + a.BypassQueryAnalysis = &bypass + return a +} + // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() @@ -142,6 +188,12 @@ func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionO if opt.TLSConfig != nil { aeo.TLSConfig = opt.TLSConfig } + if opt.EncryptedFieldsMap != nil { + aeo.EncryptedFieldsMap = opt.EncryptedFieldsMap + } + if opt.BypassQueryAnalysis != nil { + aeo.BypassQueryAnalysis = opt.BypassQueryAnalysis + } } return aeo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 2786ab2c..0c36d0b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -13,10 +13,14 @@ var DefaultOrdered = true type BulkWriteOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default value is nil, which means that no comment will be included in the logs. + Comment interface{} + // If true, no writes will be executed after one fails. The default value is true. Ordered *bool @@ -34,6 +38,12 @@ func BulkWrite() *BulkWriteOptions { } } +// SetComment sets the value for the Comment field. +func (b *BulkWriteOptions) SetComment(comment interface{}) *BulkWriteOptions { + b.Comment = comment + return b +} + // SetOrdered sets the value for the Ordered field. func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions { b.Ordered = &ordered @@ -63,6 +73,9 @@ func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { if opt == nil { continue } + if opt.Comment != nil { + b.Comment = opt.Comment + } if opt.Ordered != nil { b.Ordered = opt.Ordered } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index eb9b0643..862abcd3 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -23,11 +23,18 @@ type ChangeStreamOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation - // Specifies whether the updated document should be returned in change notifications for update operations along - // with the deltas describing the changes made to the document. The default is options.Default, which means that - // the updated document will not be included in the change notification. + // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. + // The default is nil, which means that no comment will be included in the logs. + Comment *string + + // Specifies how the updated document should be returned in change notifications for update operations. The default + // is options.Default, which means that only partial update deltas will be included in the change notification. FullDocument *FullDocument + // Specifies how the pre-update document should be returned in change notifications for update operations. The default + // is options.Off, which means that the pre-update document will not be included in the change notification. + FullDocumentBeforeChange *FullDocument + // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. MaxAwaitTime *time.Duration @@ -36,6 +43,11 @@ type ChangeStreamOptions struct { // StartAfter must not be set. ResumeAfter interface{} + // ShowExpandedEvents specifies whether the server will return an expanded list of change stream events. Additional + // events include: createIndexes, dropIndexes, modify, create, shardCollection, reshardCollection and + // refineCollectionShardKey. This option is only valid for MongoDB versions >= 6.0. + ShowExpandedEvents *bool + // If specified, the change stream will only return changes that occurred at or after the given timestamp. This // option is only valid for MongoDB versions >= 4.0. If this is specified, ResumeAfter and StartAfter must not be // set. @@ -78,12 +90,24 @@ func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions { return cso } +// SetComment sets the value for the Comment field. +func (cso *ChangeStreamOptions) SetComment(comment string) *ChangeStreamOptions { + cso.Comment = &comment + return cso +} + // SetFullDocument sets the value for the FullDocument field. func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions { cso.FullDocument = &fd return cso } +// SetFullDocumentBeforeChange sets the value for the FullDocumentBeforeChange field. +func (cso *ChangeStreamOptions) SetFullDocumentBeforeChange(fdbc FullDocument) *ChangeStreamOptions { + cso.FullDocumentBeforeChange = &fdbc + return cso +} + // SetMaxAwaitTime sets the value for the MaxAwaitTime field. func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions { cso.MaxAwaitTime = &d @@ -96,6 +120,12 @@ func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOpti return cso } +// SetShowExpandedEvents sets the value for the ShowExpandedEvents field. +func (cso *ChangeStreamOptions) SetShowExpandedEvents(see bool) *ChangeStreamOptions { + cso.ShowExpandedEvents = &see + return cso +} + // SetStartAtOperationTime sets the value for the StartAtOperationTime field. func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions { cso.StartAtOperationTime = t @@ -139,15 +169,24 @@ func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions if cso.Collation != nil { csOpts.Collation = cso.Collation } + if cso.Comment != nil { + csOpts.Comment = cso.Comment + } if cso.FullDocument != nil { csOpts.FullDocument = cso.FullDocument } + if cso.FullDocumentBeforeChange != nil { + csOpts.FullDocumentBeforeChange = cso.FullDocumentBeforeChange + } if cso.MaxAwaitTime != nil { csOpts.MaxAwaitTime = cso.MaxAwaitTime } if cso.ResumeAfter != nil { csOpts.ResumeAfter = cso.ResumeAfter } + if cso.ShowExpandedEvents != nil { + csOpts.ShowExpandedEvents = cso.ShowExpandedEvents + } if cso.StartAtOperationTime != nil { csOpts.StartAtOperationTime = cso.StartAtOperationTime } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index 115cc642..05f974f5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -45,7 +45,7 @@ type ContextDialer interface { // AuthMechanism: the mechanism to use for authentication. Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", // "MONGODB-CR", "PLAIN", "GSSAPI", "MONGODB-X509", and "MONGODB-AWS". This can also be set through the "authMechanism" // URI option. (e.g. "authMechanism=PLAIN"). For more information, see -// https://docs.mongodb.com/manual/core/authentication-mechanisms/. +// https://www.mongodb.com/docs/manual/core/authentication-mechanisms/. // // AuthMechanismProperties can be used to specify additional configuration options for certain mechanisms. They can also // be set through the "authMechanismProperites" URI option @@ -121,9 +121,9 @@ type ClientOptions struct { RetryWrites *bool ServerAPIOptions *ServerAPIOptions ServerSelectionTimeout *time.Duration - SocketTimeout *time.Duration SRVMaxHosts *int SRVServiceName *string + Timeout *time.Duration TLSConfig *tls.Config WriteConcern *writeconcern.WriteConcern ZlibLevel *int @@ -151,6 +151,13 @@ type ClientOptions struct { // Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any // release. Deployment driver.Deployment + + // SocketTimeout specifies the timeout to be used for the Client's socket reads and writes. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that a single operation can run on the Client + // before returning an error. SocketTimeout is still usable through the deprecated setter. + SocketTimeout *time.Duration } // Client creates a new ClientOptions instance. @@ -160,57 +167,58 @@ func Client() *ClientOptions { // Validate validates the client options. This method will return the first error found. func (c *ClientOptions) Validate() error { - c.validateAndSetError() - return c.err -} - -func (c *ClientOptions) validateAndSetError() { if c.err != nil { - return + return c.err } + c.err = c.validate() + return c.err +} +func (c *ClientOptions) validate() error { // Direct connections cannot be made if multiple hosts are specified or an SRV URI is used. if c.Direct != nil && *c.Direct { if len(c.Hosts) > 1 { - c.err = errors.New("a direct connection cannot be made if multiple hosts are specified") - return + return errors.New("a direct connection cannot be made if multiple hosts are specified") } if c.cs != nil && c.cs.Scheme == connstring.SchemeMongoDBSRV { - c.err = errors.New("a direct connection cannot be made if an SRV URI is used") - return + return errors.New("a direct connection cannot be made if an SRV URI is used") } } + if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && *c.MinPoolSize > *c.MaxPoolSize { + return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", *c.MinPoolSize, *c.MaxPoolSize) + } + // verify server API version if ServerAPIOptions are passed in. if c.ServerAPIOptions != nil { - c.err = c.ServerAPIOptions.ServerAPIVersion.Validate() + if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { + return err + } } // Validation for load-balanced mode. if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { - c.err = internal.ErrLoadBalancedWithMultipleHosts - return + return internal.ErrLoadBalancedWithMultipleHosts } if c.ReplicaSet != nil { - c.err = internal.ErrLoadBalancedWithReplicaSet - return + return internal.ErrLoadBalancedWithReplicaSet } if c.Direct != nil { - c.err = internal.ErrLoadBalancedWithDirectConnection - return + return internal.ErrLoadBalancedWithDirectConnection } } // Validation for srvMaxHosts. if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { if c.ReplicaSet != nil { - c.err = internal.ErrSRVMaxHostsWithReplicaSet + return internal.ErrSRVMaxHostsWithReplicaSet } if c.LoadBalanced != nil && *c.LoadBalanced { - c.err = internal.ErrSRVMaxHostsWithLoadBalanced + return internal.ErrSRVMaxHostsWithLoadBalanced } } + return nil } // GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during @@ -231,7 +239,7 @@ func (c *ClientOptions) GetURI() string { // If the URI format is incorrect or there are conflicting options specified in the URI an error will be recorded and // can be retrieved by calling Validate. // -// For more information about the URI format, see https://docs.mongodb.com/manual/reference/connection-string/. See +// For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See // mongo.Connect documentation for examples of using URIs for different Client configurations. func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { if c.err != nil { @@ -445,6 +453,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.DisableOCSPEndpointCheck = &cs.SSLDisableOCSPEndpointCheck } + if cs.TimeoutSet { + c.Timeout = &cs.Timeout + } + return c } @@ -470,12 +482,12 @@ func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { // // 2. "zlib" - requires server version >= 3.6 // -// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver version >= 1.3.0 -// without cgo +// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver +// version >= 1.3.0 without cgo. // // If this option is specified, the driver will perform a negotiation with the server to determine a common list of of // compressors and will use the first one in that list when performing operations. See -// https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more +// https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more // information about configuring compression on the server and the server-side defaults. // // This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is @@ -636,7 +648,7 @@ func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptio // 3. "maxStalenessSeconds" (or "maxStaleness"): Specify a maximum replication lag for reads from secondaries in a // replica set (e.g. "maxStalenessSeconds=10"). // -// The default is readpref.Primary(). See https://docs.mongodb.com/manual/core/read-preference/#read-preference for +// The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for // more information about read preferences. func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions { c.ReadPreference = rp @@ -702,11 +714,30 @@ func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOption // SetSocketTimeout specifies how long the driver will wait for a socket read or write to return before returning a // network error. This can also be set through the "socketTimeoutMS" URI option (e.g. "socketTimeoutMS=1000"). The // default value is 0, meaning no timeout is used and socket operations can block indefinitely. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that a single operation can run on the Client +// before returning an error. func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions { c.SocketTimeout = &d return c } +// SetTimeout specifies the amount of time that a single operation run on this Client can execute before returning an error. +// The deadline of any operation run through the Client will be honored above any Timeout set on the Client; Timeout will only +// be honored if there is no deadline on the operation Context. Timeout can also be set through the "timeoutMS" URI option +// (e.g. "timeoutMS=1000"). The default value is nil, meaning operations do not inherit a timeout from the Client. +// +// If any Timeout is set (even 0) on the Client, the values of other, deprecated timeout-related options will be ignored. +// In particular: ClientOptions.SocketTimeout, WriteConcern.wTimeout, MaxTime on operations, and TransactionOptions.MaxCommitTime. +// +// NOTE(benjirewis): SetTimeout represents unstable, provisional API. The behavior of the driver when a Timeout is specified is +// subject to change. +func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { + c.Timeout = &d + return c +} + // SetTLSConfig specifies a tls.Config instance to use use to configure TLS on all connections created to the cluster. // This can also be set through the following URI options: // @@ -920,6 +951,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.SRVServiceName != nil { c.SRVServiceName = opt.SRVServiceName } + if opt.Timeout != nil { + c.Timeout = opt.Timeout + } if opt.TLSConfig != nil { c.TLSConfig = opt.TLSConfig } @@ -983,7 +1017,9 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := append(keyData, '\n') + data := make([]byte, 0, len(keyData)+len(certData)+1) + data = append(data, keyData...) + data = append(data, '\n') data = append(data, certData...) return addClientCertFromBytes(cfg, data, keyPassword) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 5c811147..e8b68a27 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -15,20 +15,20 @@ import ( // CollectionOptions represents options that can be used to configure a Collection. type CollectionOptions struct { - // The read concern to use for operations executed on the Collection. The default value is nil, which means that - // the read concern of the database used to configure the Collection will be used. + // ReadConcern is the read concern to use for operations executed on the Collection. The default value is nil, which means that + // the read concern of the Database used to configure the Collection will be used. ReadConcern *readconcern.ReadConcern - // The write concern to use for operations executed on the Collection. The default value is nil, which means that - // the write concern of the database used to configure the Collection will be used. + // WriteConcern is the write concern to use for operations executed on the Collection. The default value is nil, which means that + // the write concern of the Database used to configure the Collection will be used. WriteConcern *writeconcern.WriteConcern - // The read preference to use for operations executed on the Collection. The default value is nil, which means that - // the read preference of the database used to configure the Collection will be used. + // ReadPreference is the read preference to use for operations executed on the Collection. The default value is nil, which means that + // the read preference of the Database used to configure the Collection will be used. ReadPreference *readpref.ReadPref - // The BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value - // is nil, which means that the registry of the database used to configure the Collection will be used. + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value + // is nil, which means that the registry of the Database used to configure the Collection will be used. Registry *bsoncodec.Registry } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go index 094524c1..06f5dce7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go @@ -15,6 +15,13 @@ type CountOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation + // TODO(GODRIVER-2386): CountOptions executor uses aggregation under the hood, which means this type has to be + // TODO a string for now. This can be replaced with `Comment interface{}` once 2386 is implemented. + + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default is nil, which means that no comment will be included in the logs. + Comment *string + // The index to use for the aggregation. This should either be the index name as a string or the index specification // as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, // which means that no hint will be sent. @@ -26,6 +33,10 @@ type CountOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there is // no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the count operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The number of documents to skip before counting. The default value is 0. @@ -43,6 +54,12 @@ func (co *CountOptions) SetCollation(c *Collation) *CountOptions { return co } +// SetComment sets the value for the Comment field. +func (co *CountOptions) SetComment(c string) *CountOptions { + co.Comment = &c + return co +} + // SetHint sets the value for the Hint field. func (co *CountOptions) SetHint(h interface{}) *CountOptions { co.Hint = h @@ -56,6 +73,10 @@ func (co *CountOptions) SetLimit(i int64) *CountOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that the count operation can run before +// returning an error. func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions { co.MaxTime = &d return co @@ -77,6 +98,9 @@ func MergeCountOptions(opts ...*CountOptions) *CountOptions { if co.Collation != nil { countOpts.Collation = co.Collation } + if co.Comment != nil { + countOpts.Comment = co.Comment + } if co.Hint != nil { countOpts.Hint = co.Hint } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 130c8e75..6fc7d066 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -67,7 +67,7 @@ func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOpti // CreateCollectionOptions represents options that can be used to configure a CreateCollection operation. type CreateCollectionOptions struct { - // Specifies if the collection is capped (see https://docs.mongodb.com/manual/core/capped-collections/). If true, + // Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true, // the SizeInBytes option must also be specified. The default value is false. Capped *bool @@ -75,6 +75,12 @@ type CreateCollectionOptions struct { // For previous server versions, the driver will return an error if this option is used. The default value is nil. Collation *Collation + // Specifies how change streams opened against the collection can return pre- and post-images of updated + // documents. The value must be a document in the form {